2024-12-12 22:34:26,755 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 22:34:26,774 main DEBUG Took 0.013796 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-12 22:34:26,780 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-12 22:34:26,780 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-12 22:34:26,783 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-12 22:34:26,785 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,797 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-12 22:34:26,820 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,828 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,829 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,829 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,830 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,830 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,831 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,832 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,832 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,834 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,835 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,835 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,837 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,838 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,839 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,839 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,839 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,841 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,841 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,842 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,842 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-12 22:34:26,842 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,843 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-12 22:34:26,845 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-12 22:34:26,847 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-12 22:34:26,850 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-12 22:34:26,851 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-12 22:34:26,853 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-12 22:34:26,853 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-12 22:34:26,865 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-12 22:34:26,869 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-12 22:34:26,872 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-12 22:34:26,872 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-12 22:34:26,873 main DEBUG createAppenders(={Console}) 2024-12-12 22:34:26,874 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-12 22:34:26,875 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-12 22:34:26,875 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-12 22:34:26,876 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-12 22:34:26,877 main DEBUG OutputStream closed 2024-12-12 22:34:26,877 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-12 22:34:26,877 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-12 22:34:26,878 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-12 22:34:26,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-12 22:34:26,970 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-12 22:34:26,971 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-12 22:34:26,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-12 22:34:26,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-12 22:34:26,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-12 22:34:26,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-12 22:34:26,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-12 22:34:26,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-12 22:34:26,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-12 22:34:26,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-12 22:34:26,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-12 22:34:26,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-12 22:34:26,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-12 22:34:26,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-12 22:34:26,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-12 22:34:26,977 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-12 22:34:26,978 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-12 22:34:26,981 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12 22:34:26,981 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-12 22:34:26,982 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-12 22:34:26,983 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-12T22:34:27,260 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00 2024-12-12 22:34:27,263 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-12 22:34:27,264 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-12T22:34:27,274 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-12T22:34:27,296 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-12T22:34:27,300 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912, deleteOnExit=true 2024-12-12T22:34:27,301 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-12T22:34:27,301 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/test.cache.data in system properties and HBase conf 2024-12-12T22:34:27,302 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.tmp.dir in system properties and HBase conf 2024-12-12T22:34:27,303 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.log.dir in system properties and HBase conf 2024-12-12T22:34:27,304 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-12T22:34:27,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-12T22:34:27,305 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-12T22:34:27,427 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-12T22:34:27,563 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-12T22:34:27,568 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-12T22:34:27,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-12T22:34:27,569 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-12T22:34:27,570 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T22:34:27,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-12T22:34:27,571 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-12T22:34:27,572 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-12T22:34:27,572 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T22:34:27,572 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-12T22:34:27,573 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/nfs.dump.dir in system properties and HBase conf 2024-12-12T22:34:27,573 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/java.io.tmpdir in system properties and HBase conf 2024-12-12T22:34:27,574 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-12T22:34:27,574 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-12T22:34:27,575 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-12T22:34:28,812 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-12T22:34:28,892 INFO [Time-limited test {}] log.Log(170): Logging initialized @3148ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-12T22:34:28,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T22:34:29,022 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T22:34:29,040 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T22:34:29,041 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T22:34:29,042 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-12T22:34:29,059 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T22:34:29,064 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.log.dir/,AVAILABLE} 2024-12-12T22:34:29,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T22:34:29,273 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/java.io.tmpdir/jetty-localhost-39329-hadoop-hdfs-3_4_1-tests_jar-_-any-17949631798127408317/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T22:34:29,279 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:39329} 2024-12-12T22:34:29,280 INFO [Time-limited test {}] server.Server(415): Started @3537ms 2024-12-12T22:34:29,922 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-12T22:34:29,931 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-12T22:34:29,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-12T22:34:29,932 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-12T22:34:29,933 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-12T22:34:29,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.log.dir/,AVAILABLE} 2024-12-12T22:34:29,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-12T22:34:30,039 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/java.io.tmpdir/jetty-localhost-37929-hadoop-hdfs-3_4_1-tests_jar-_-any-10800083864318405189/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T22:34:30,040 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:37929} 2024-12-12T22:34:30,040 INFO [Time-limited test {}] server.Server(415): Started @4297ms 2024-12-12T22:34:30,092 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-12T22:34:31,017 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/dfs/data/data2/current/BP-1705391202-172.17.0.2-1734042868369/current, will proceed with Du for space computation calculation, 2024-12-12T22:34:31,017 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/dfs/data/data1/current/BP-1705391202-172.17.0.2-1734042868369/current, will proceed with Du for space computation calculation, 2024-12-12T22:34:31,099 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-12T22:34:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4045e025e7f46993 with lease ID 0x2e6cbea64c9a9d41: Processing first storage report for DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d from datanode DatanodeRegistration(127.0.0.1:43795, datanodeUuid=348a55b3-053a-4753-a6e7-a80f38e05dff, infoPort=42915, infoSecurePort=0, ipcPort=38373, storageInfo=lv=-57;cid=testClusterID;nsid=189680365;c=1734042868369) 2024-12-12T22:34:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4045e025e7f46993 with lease ID 0x2e6cbea64c9a9d41: from storage DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d node DatanodeRegistration(127.0.0.1:43795, datanodeUuid=348a55b3-053a-4753-a6e7-a80f38e05dff, infoPort=42915, infoSecurePort=0, ipcPort=38373, storageInfo=lv=-57;cid=testClusterID;nsid=189680365;c=1734042868369), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-12T22:34:31,184 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4045e025e7f46993 with lease ID 0x2e6cbea64c9a9d41: Processing first storage report for DS-a4a19485-9e19-49a5-8ac5-371157b027da from datanode DatanodeRegistration(127.0.0.1:43795, datanodeUuid=348a55b3-053a-4753-a6e7-a80f38e05dff, infoPort=42915, infoSecurePort=0, ipcPort=38373, storageInfo=lv=-57;cid=testClusterID;nsid=189680365;c=1734042868369) 2024-12-12T22:34:31,184 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4045e025e7f46993 with lease ID 0x2e6cbea64c9a9d41: from storage DS-a4a19485-9e19-49a5-8ac5-371157b027da node DatanodeRegistration(127.0.0.1:43795, datanodeUuid=348a55b3-053a-4753-a6e7-a80f38e05dff, infoPort=42915, infoSecurePort=0, ipcPort=38373, storageInfo=lv=-57;cid=testClusterID;nsid=189680365;c=1734042868369), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-12T22:34:31,239 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00 2024-12-12T22:34:31,489 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/zookeeper_0, clientPort=50645, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-12T22:34:31,516 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=50645 2024-12-12T22:34:31,530 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:31,535 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:31,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741825_1001 (size=7) 2024-12-12T22:34:32,333 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc with version=8 2024-12-12T22:34:32,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/hbase-staging 2024-12-12T22:34:32,484 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-12T22:34:32,801 INFO [Time-limited test {}] client.ConnectionUtils(129): master/1aef280cf0a8:0 server-side Connection retries=45 2024-12-12T22:34:32,825 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:32,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:32,826 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T22:34:32,826 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:32,827 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T22:34:32,982 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T22:34:33,064 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-12T22:34:33,077 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-12T22:34:33,082 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T22:34:33,122 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 45641 (auto-detected) 2024-12-12T22:34:33,123 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-12T22:34:33,160 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35059 2024-12-12T22:34:33,174 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:33,177 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:33,202 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35059 connecting to ZooKeeper ensemble=127.0.0.1:50645 2024-12-12T22:34:33,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350590x0, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T22:34:33,381 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35059-0x1001c6182dc0000 connected 2024-12-12T22:34:33,483 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T22:34:33,489 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T22:34:33,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T22:34:33,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-12T22:34:33,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35059 2024-12-12T22:34:33,504 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35059 2024-12-12T22:34:33,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-12T22:34:33,508 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35059 2024-12-12T22:34:33,515 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc, hbase.cluster.distributed=false 2024-12-12T22:34:33,577 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/1aef280cf0a8:0 server-side Connection retries=45 2024-12-12T22:34:33,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:33,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:33,578 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-12T22:34:33,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-12T22:34:33,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-12T22:34:33,580 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-12T22:34:33,583 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-12T22:34:33,584 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36025 2024-12-12T22:34:33,586 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-12T22:34:33,592 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-12T22:34:33,594 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:33,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:33,603 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36025 connecting to ZooKeeper ensemble=127.0.0.1:50645 2024-12-12T22:34:33,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360250x0, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-12T22:34:33,615 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360250x0, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T22:34:33,615 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36025-0x1001c6182dc0001 connected 2024-12-12T22:34:33,617 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T22:34:33,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-12T22:34:33,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36025 2024-12-12T22:34:33,619 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36025 2024-12-12T22:34:33,620 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36025 2024-12-12T22:34:33,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36025 2024-12-12T22:34:33,621 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36025 2024-12-12T22:34:33,625 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:33,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T22:34:33,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T22:34:33,642 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:33,648 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;1aef280cf0a8:35059 2024-12-12T22:34:33,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T22:34:33,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-12T22:34:33,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:33,671 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:33,672 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T22:34:33,673 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/1aef280cf0a8,35059,1734042872477 from backup master directory 2024-12-12T22:34:33,673 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-12T22:34:33,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:33,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T22:34:33,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-12T22:34:33,681 WARN [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T22:34:33,682 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:33,684 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-12T22:34:33,685 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-12T22:34:33,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741826_1002 (size=42) 2024-12-12T22:34:33,763 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/hbase.id with ID: 5ca99a12-1e5d-4f32-80c9-306c3fa84b48 2024-12-12T22:34:33,825 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-12T22:34:33,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:33,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:34,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741827_1003 (size=196) 2024-12-12T22:34:34,022 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:34:34,024 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-12T22:34:34,043 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:34,048 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T22:34:34,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741828_1004 (size=1189) 2024-12-12T22:34:34,107 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store 2024-12-12T22:34:34,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741829_1005 (size=34) 2024-12-12T22:34:34,550 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-12T22:34:34,551 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:34,553 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T22:34:34,553 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:34:34,553 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:34:34,553 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T22:34:34,554 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:34:34,554 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:34:34,554 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T22:34:34,557 WARN [master/1aef280cf0a8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/.initializing 2024-12-12T22:34:34,558 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/WALs/1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:34,571 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T22:34:34,598 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1aef280cf0a8%2C35059%2C1734042872477, suffix=, logDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/WALs/1aef280cf0a8,35059,1734042872477, archiveDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/oldWALs, maxLogs=10 2024-12-12T22:34:34,645 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/WALs/1aef280cf0a8,35059,1734042872477/1aef280cf0a8%2C35059%2C1734042872477.1734042874607, exclude list is [], retry=0 2024-12-12T22:34:34,689 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43795,DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d,DISK] 2024-12-12T22:34:34,694 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-12T22:34:34,761 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/WALs/1aef280cf0a8,35059,1734042872477/1aef280cf0a8%2C35059%2C1734042872477.1734042874607 2024-12-12T22:34:34,762 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42915:42915)] 2024-12-12T22:34:34,763 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:34:34,766 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:34,776 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,777 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,940 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-12T22:34:34,944 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:34,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:34,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,958 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-12T22:34:34,958 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:34,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:34,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,966 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-12T22:34:34,967 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:34,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:34,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,977 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-12T22:34:34,977 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:34,981 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:34,987 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:34,989 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:35,001 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-12T22:34:35,006 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-12T22:34:35,016 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:34:35,017 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60245342, jitterRate=-0.10227444767951965}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-12T22:34:35,022 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T22:34:35,024 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-12T22:34:35,057 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e364b9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:35,104 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-12T22:34:35,120 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-12T22:34:35,120 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-12T22:34:35,124 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-12T22:34:35,126 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 2 msec 2024-12-12T22:34:35,134 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec 2024-12-12T22:34:35,134 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-12T22:34:35,173 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-12T22:34:35,189 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-12T22:34:35,205 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-12T22:34:35,209 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-12T22:34:35,211 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-12T22:34:35,232 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-12T22:34:35,235 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-12T22:34:35,251 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-12T22:34:35,267 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-12T22:34:35,269 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-12T22:34:35,281 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-12T22:34:35,300 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-12T22:34:35,307 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-12T22:34:35,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T22:34:35,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-12T22:34:35,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,328 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=1aef280cf0a8,35059,1734042872477, sessionid=0x1001c6182dc0000, setting cluster-up flag (Was=false) 2024-12-12T22:34:35,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,463 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-12T22:34:35,476 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:35,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:35,570 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-12T22:34:35,593 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:35,672 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;1aef280cf0a8:36025 2024-12-12T22:34:35,675 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1008): ClusterId : 5ca99a12-1e5d-4f32-80c9-306c3fa84b48 2024-12-12T22:34:35,680 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-12T22:34:35,693 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-12T22:34:35,693 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-12T22:34:35,712 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-12T22:34:35,712 DEBUG [RS:0;1aef280cf0a8:36025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d690fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:35,729 DEBUG [RS:0;1aef280cf0a8:36025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774c8a3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1aef280cf0a8/172.17.0.2:0 2024-12-12T22:34:35,733 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-12T22:34:35,734 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-12T22:34:35,734 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-12T22:34:35,744 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(3073): reportForDuty to master=1aef280cf0a8,35059,1734042872477 with isa=1aef280cf0a8/172.17.0.2:36025, startcode=1734042873576 2024-12-12T22:34:35,774 DEBUG [RS:0;1aef280cf0a8:36025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T22:34:35,782 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-12T22:34:35,795 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-12T22:34:35,813 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-12T22:34:35,820 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 1aef280cf0a8,35059,1734042872477 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-12T22:34:35,831 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/1aef280cf0a8:0, corePoolSize=5, maxPoolSize=5 2024-12-12T22:34:35,832 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/1aef280cf0a8:0, corePoolSize=5, maxPoolSize=5 2024-12-12T22:34:35,832 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/1aef280cf0a8:0, corePoolSize=5, maxPoolSize=5 2024-12-12T22:34:35,832 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/1aef280cf0a8:0, corePoolSize=5, maxPoolSize=5 2024-12-12T22:34:35,832 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/1aef280cf0a8:0, corePoolSize=10, maxPoolSize=10 2024-12-12T22:34:35,833 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:35,833 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/1aef280cf0a8:0, corePoolSize=2, maxPoolSize=2 2024-12-12T22:34:35,833 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:35,883 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T22:34:35,893 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-12T22:34:35,896 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39165, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T22:34:35,904 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734042905904 2024-12-12T22:34:35,904 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35059 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:35,906 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-12T22:34:35,907 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-12T22:34:35,921 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:35,921 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T22:34:35,934 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-12T22:34:35,935 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-12T22:34:35,936 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-12T22:34:35,936 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-12T22:34:35,953 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:35,955 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-12T22:34:35,956 WARN [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-12T22:34:35,964 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-12T22:34:35,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741831_1007 (size=1039) 2024-12-12T22:34:35,965 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-12T22:34:35,966 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-12T22:34:35,974 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-12T22:34:35,974 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-12T22:34:35,980 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-12T22:34:35,981 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:34:35,984 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.large.0-1734042875976,5,FailOnTimeoutGroup] 2024-12-12T22:34:35,990 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.small.0-1734042875984,5,FailOnTimeoutGroup] 2024-12-12T22:34:35,990 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:35,990 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-12T22:34:35,992 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:35,993 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741832_1008 (size=32) 2024-12-12T22:34:36,057 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(3073): reportForDuty to master=1aef280cf0a8,35059,1734042872477 with isa=1aef280cf0a8/172.17.0.2:36025, startcode=1734042873576 2024-12-12T22:34:36,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35059 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,061 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35059 {}] master.ServerManager(486): Registering regionserver=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,069 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:34:36,069 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:41151 2024-12-12T22:34:36,070 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-12T22:34:36,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T22:34:36,081 DEBUG [RS:0;1aef280cf0a8:36025 {}] zookeeper.ZKUtil(111): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,081 WARN [RS:0;1aef280cf0a8:36025 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-12T22:34:36,081 INFO [RS:0;1aef280cf0a8:36025 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T22:34:36,082 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,083 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [1aef280cf0a8,36025,1734042873576] 2024-12-12T22:34:36,095 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-12T22:34:36,105 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-12T22:34:36,116 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-12T22:34:36,119 INFO [RS:0;1aef280cf0a8:36025 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-12T22:34:36,120 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,120 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-12T22:34:36,126 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,126 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,126 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,126 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/1aef280cf0a8:0, corePoolSize=2, maxPoolSize=2 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,127 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/1aef280cf0a8:0, corePoolSize=1, maxPoolSize=1 2024-12-12T22:34:36,128 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/1aef280cf0a8:0, corePoolSize=3, maxPoolSize=3 2024-12-12T22:34:36,128 DEBUG [RS:0;1aef280cf0a8:36025 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0, corePoolSize=3, maxPoolSize=3 2024-12-12T22:34:36,128 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,129 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,129 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,129 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,129 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,36025,1734042873576-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T22:34:36,153 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-12T22:34:36,155 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,36025,1734042873576-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:36,173 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.Replication(204): 1aef280cf0a8,36025,1734042873576 started 2024-12-12T22:34:36,173 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1767): Serving as 1aef280cf0a8,36025,1734042873576, RpcServer on 1aef280cf0a8/172.17.0.2:36025, sessionid=0x1001c6182dc0001 2024-12-12T22:34:36,174 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-12T22:34:36,174 DEBUG [RS:0;1aef280cf0a8:36025 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,174 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1aef280cf0a8,36025,1734042873576' 2024-12-12T22:34:36,174 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-12T22:34:36,175 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-12T22:34:36,176 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-12T22:34:36,176 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-12T22:34:36,176 DEBUG [RS:0;1aef280cf0a8:36025 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,176 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '1aef280cf0a8,36025,1734042873576' 2024-12-12T22:34:36,176 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-12T22:34:36,177 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-12T22:34:36,177 DEBUG [RS:0;1aef280cf0a8:36025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-12T22:34:36,177 INFO [RS:0;1aef280cf0a8:36025 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-12T22:34:36,178 INFO [RS:0;1aef280cf0a8:36025 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-12T22:34:36,284 INFO [RS:0;1aef280cf0a8:36025 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-12T22:34:36,287 INFO [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1aef280cf0a8%2C36025%2C1734042873576, suffix=, logDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576, archiveDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/oldWALs, maxLogs=32 2024-12-12T22:34:36,302 DEBUG [RS:0;1aef280cf0a8:36025 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576/1aef280cf0a8%2C36025%2C1734042873576.1734042876289, exclude list is [], retry=0 2024-12-12T22:34:36,307 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43795,DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d,DISK] 2024-12-12T22:34:36,310 INFO [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576/1aef280cf0a8%2C36025%2C1734042873576.1734042876289 2024-12-12T22:34:36,311 DEBUG [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42915:42915)] 2024-12-12T22:34:36,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:36,446 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T22:34:36,450 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T22:34:36,451 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:36,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:36,453 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T22:34:36,457 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T22:34:36,457 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:36,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:36,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T22:34:36,464 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T22:34:36,465 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:36,468 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:36,470 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740 2024-12-12T22:34:36,471 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740 2024-12-12T22:34:36,475 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:34:36,482 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T22:34:36,487 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:34:36,488 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74327237, jitterRate=0.10756213963031769}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:34:36,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T22:34:36,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T22:34:36,491 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T22:34:36,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T22:34:36,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T22:34:36,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T22:34:36,493 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T22:34:36,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T22:34:36,497 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-12T22:34:36,497 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-12T22:34:36,505 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-12T22:34:36,517 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-12T22:34:36,520 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-12T22:34:36,673 DEBUG [1aef280cf0a8:35059 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-12T22:34:36,679 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,686 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1aef280cf0a8,36025,1734042873576, state=OPENING 2024-12-12T22:34:36,714 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-12T22:34:36,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:36,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:36,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T22:34:36,723 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T22:34:36,725 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:34:36,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:36,922 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-12T22:34:36,940 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-12T22:34:36,980 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-12T22:34:36,981 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-12T22:34:36,982 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-12T22:34:37,008 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=1aef280cf0a8%2C36025%2C1734042873576.meta, suffix=.meta, logDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576, archiveDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/oldWALs, maxLogs=32 2024-12-12T22:34:37,045 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576/1aef280cf0a8%2C36025%2C1734042873576.meta.1734042877010.meta, exclude list is [], retry=0 2024-12-12T22:34:37,067 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43795,DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d,DISK] 2024-12-12T22:34:37,078 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/WALs/1aef280cf0a8,36025,1734042873576/1aef280cf0a8%2C36025%2C1734042873576.meta.1734042877010.meta 2024-12-12T22:34:37,079 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42915:42915)] 2024-12-12T22:34:37,080 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:34:37,081 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-12T22:34:37,153 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-12T22:34:37,158 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-12T22:34:37,164 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-12T22:34:37,164 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:37,164 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-12T22:34:37,164 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-12T22:34:37,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-12T22:34:37,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-12T22:34:37,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:37,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:37,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-12T22:34:37,172 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-12T22:34:37,172 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:37,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:37,173 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-12T22:34:37,174 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-12T22:34:37,174 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:37,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-12T22:34:37,177 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740 2024-12-12T22:34:37,180 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740 2024-12-12T22:34:37,183 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:34:37,185 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-12T22:34:37,187 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62494616, jitterRate=-0.06875765323638916}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:34:37,188 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-12T22:34:37,194 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734042876909 2024-12-12T22:34:37,203 DEBUG [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-12T22:34:37,204 INFO [RS_OPEN_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-12T22:34:37,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:37,206 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 1aef280cf0a8,36025,1734042873576, state=OPEN 2024-12-12T22:34:37,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T22:34:37,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-12T22:34:37,246 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T22:34:37,246 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-12T22:34:37,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-12T22:34:37,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=1aef280cf0a8,36025,1734042873576 in 521 msec 2024-12-12T22:34:37,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-12T22:34:37,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 748 msec 2024-12-12T22:34:37,265 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5990 sec 2024-12-12T22:34:37,265 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734042877265, completionTime=-1 2024-12-12T22:34:37,265 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-12T22:34:37,266 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-12T22:34:37,297 DEBUG [hconnection-0x4fe961e3-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:37,300 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:37,308 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-12T22:34:37,308 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734042937308 2024-12-12T22:34:37,308 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734042997308 2024-12-12T22:34:37,308 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 42 msec 2024-12-12T22:34:37,341 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:37,342 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:37,342 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:37,345 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-1aef280cf0a8:35059, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:37,346 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:37,352 DEBUG [master/1aef280cf0a8:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-12T22:34:37,356 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-12T22:34:37,358 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-12T22:34:37,364 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-12T22:34:37,367 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:34:37,368 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:37,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:34:37,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741835_1011 (size=358) 2024-12-12T22:34:37,789 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 20f68c42b55d0d7b4a49ed486e40f5a4, NAME => 'hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:34:37,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741836_1012 (size=42) 2024-12-12T22:34:37,824 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:37,824 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 20f68c42b55d0d7b4a49ed486e40f5a4, disabling compactions & flushes 2024-12-12T22:34:37,824 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:37,824 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:37,825 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. after waiting 0 ms 2024-12-12T22:34:37,825 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:37,825 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:37,825 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 20f68c42b55d0d7b4a49ed486e40f5a4: 2024-12-12T22:34:37,829 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:34:37,837 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734042877830"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042877830"}]},"ts":"1734042877830"} 2024-12-12T22:34:37,885 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:34:37,920 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:34:37,925 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042877921"}]},"ts":"1734042877921"} 2024-12-12T22:34:37,933 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-12T22:34:37,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=20f68c42b55d0d7b4a49ed486e40f5a4, ASSIGN}] 2024-12-12T22:34:37,976 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=20f68c42b55d0d7b4a49ed486e40f5a4, ASSIGN 2024-12-12T22:34:37,979 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=20f68c42b55d0d7b4a49ed486e40f5a4, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:34:38,130 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=20f68c42b55d0d7b4a49ed486e40f5a4, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:38,135 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 20f68c42b55d0d7b4a49ed486e40f5a4, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:34:38,291 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:38,300 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:38,300 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 20f68c42b55d0d7b4a49ed486e40f5a4, NAME => 'hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:34:38,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:38,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,301 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,317 INFO [StoreOpener-20f68c42b55d0d7b4a49ed486e40f5a4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,326 INFO [StoreOpener-20f68c42b55d0d7b4a49ed486e40f5a4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 20f68c42b55d0d7b4a49ed486e40f5a4 columnFamilyName info 2024-12-12T22:34:38,326 DEBUG [StoreOpener-20f68c42b55d0d7b4a49ed486e40f5a4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:38,328 INFO [StoreOpener-20f68c42b55d0d7b4a49ed486e40f5a4-1 {}] regionserver.HStore(327): Store=20f68c42b55d0d7b4a49ed486e40f5a4/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:38,332 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,339 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,348 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:34:38,358 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:34:38,363 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 20f68c42b55d0d7b4a49ed486e40f5a4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60473149, jitterRate=-0.0988798588514328}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-12T22:34:38,365 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 20f68c42b55d0d7b4a49ed486e40f5a4: 2024-12-12T22:34:38,368 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4., pid=6, masterSystemTime=1734042878291 2024-12-12T22:34:38,426 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:38,427 INFO [RS_OPEN_PRIORITY_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:34:38,429 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=20f68c42b55d0d7b4a49ed486e40f5a4, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:38,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-12T22:34:38,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 20f68c42b55d0d7b4a49ed486e40f5a4, server=1aef280cf0a8,36025,1734042873576 in 304 msec 2024-12-12T22:34:38,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-12T22:34:38,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=20f68c42b55d0d7b4a49ed486e40f5a4, ASSIGN in 479 msec 2024-12-12T22:34:38,463 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:34:38,463 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042878463"}]},"ts":"1734042878463"} 2024-12-12T22:34:38,466 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-12T22:34:38,486 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:34:38,488 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-12T22:34:38,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.1270 sec 2024-12-12T22:34:38,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-12T22:34:38,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:38,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:34:38,559 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-12T22:34:38,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T22:34:38,617 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 63 msec 2024-12-12T22:34:38,628 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-12T22:34:38,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-12T22:34:38,686 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 57 msec 2024-12-12T22:34:38,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-12T22:34:38,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-12T22:34:38,750 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.068sec 2024-12-12T22:34:38,752 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-12T22:34:38,754 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-12T22:34:38,755 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-12T22:34:38,756 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-12T22:34:38,756 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-12T22:34:38,757 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-12T22:34:38,758 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-12T22:34:38,773 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-12T22:34:38,774 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-12T22:34:38,775 INFO [master/1aef280cf0a8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=1aef280cf0a8,35059,1734042872477-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-12T22:34:38,777 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-12-12T22:34:38,778 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-12T22:34:38,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:38,812 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-12T22:34:38,812 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-12T22:34:38,827 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:38,838 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:38,849 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=1aef280cf0a8,35059,1734042872477 2024-12-12T22:34:38,864 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=218, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=899, ProcessCount=11, AvailableMemoryMB=6826 2024-12-12T22:34:38,882 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:34:38,901 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42518, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:34:38,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:34:38,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:34:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:34:38,939 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:34:38,940 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:38,947 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:34:38,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-12T22:34:38,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:39,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741837_1013 (size=963) 2024-12-12T22:34:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:39,464 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:34:39,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741838_1014 (size=53) 2024-12-12T22:34:39,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:39,891 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:39,892 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3292c08f1e2fe18d3fcbb52f186614f5, disabling compactions & flushes 2024-12-12T22:34:39,892 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:39,892 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:39,892 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. after waiting 0 ms 2024-12-12T22:34:39,892 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:39,892 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:39,892 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:39,895 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:34:39,896 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734042879896"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042879896"}]},"ts":"1734042879896"} 2024-12-12T22:34:39,901 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:34:39,903 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:34:39,904 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042879903"}]},"ts":"1734042879903"} 2024-12-12T22:34:39,908 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:34:39,997 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, ASSIGN}] 2024-12-12T22:34:40,000 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, ASSIGN 2024-12-12T22:34:40,002 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:34:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:40,153 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3292c08f1e2fe18d3fcbb52f186614f5, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:40,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:34:40,343 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:40,367 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:40,368 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:34:40,368 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,368 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:34:40,369 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,369 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,391 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,407 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:34:40,408 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3292c08f1e2fe18d3fcbb52f186614f5 columnFamilyName A 2024-12-12T22:34:40,408 DEBUG [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:40,417 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(327): Store=3292c08f1e2fe18d3fcbb52f186614f5/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:40,417 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,423 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:34:40,423 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3292c08f1e2fe18d3fcbb52f186614f5 columnFamilyName B 2024-12-12T22:34:40,424 DEBUG [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:40,431 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(327): Store=3292c08f1e2fe18d3fcbb52f186614f5/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:40,431 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,438 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:34:40,438 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3292c08f1e2fe18d3fcbb52f186614f5 columnFamilyName C 2024-12-12T22:34:40,439 DEBUG [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:34:40,444 INFO [StoreOpener-3292c08f1e2fe18d3fcbb52f186614f5-1 {}] regionserver.HStore(327): Store=3292c08f1e2fe18d3fcbb52f186614f5/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:34:40,445 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:40,447 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,452 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,482 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:34:40,486 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:40,527 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:34:40,531 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 3292c08f1e2fe18d3fcbb52f186614f5; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62809983, jitterRate=-0.064058318734169}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:34:40,534 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:40,540 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., pid=11, masterSystemTime=1734042880343 2024-12-12T22:34:40,560 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:40,560 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:40,564 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3292c08f1e2fe18d3fcbb52f186614f5, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:40,581 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-12T22:34:40,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 in 406 msec 2024-12-12T22:34:40,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-12T22:34:40,592 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, ASSIGN in 584 msec 2024-12-12T22:34:40,594 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:34:40,595 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042880595"}]},"ts":"1734042880595"} 2024-12-12T22:34:40,598 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:34:40,640 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:34:40,649 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.7160 sec 2024-12-12T22:34:41,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-12T22:34:41,122 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-12T22:34:41,129 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e67f019 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fcb5f29 2024-12-12T22:34:41,160 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fdf5682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,168 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,177 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,184 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:34:41,198 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45042, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:34:41,222 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5095ba91 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f2091cc 2024-12-12T22:34:41,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d38d10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,254 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-12T22:34:41,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72b32f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c43377 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18cb251d 2024-12-12T22:34:41,341 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,343 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-12T22:34:41,411 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbb5d8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,413 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a8f4734 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e52b42a 2024-12-12T22:34:41,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,477 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10c964e8 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9ed28bb 2024-12-12T22:34:41,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5cad1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-12T22:34:41,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,575 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x527c6d40 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@353bc462 2024-12-12T22:34:41,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@767a8485, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-12T22:34:41,689 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:34:41,717 DEBUG [hconnection-0x1edc7dcf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,718 DEBUG [hconnection-0x38bda5c0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,751 DEBUG [hconnection-0x53bc7585-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,760 DEBUG [hconnection-0x1b171e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,764 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:34:41,780 DEBUG [hconnection-0x1a036934-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-12T22:34:41,785 DEBUG [hconnection-0x54e848df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:41,792 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:34:41,794 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:34:41,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:34:41,804 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,808 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49342, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,816 DEBUG [hconnection-0x2a80f7f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,822 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,841 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,847 DEBUG [hconnection-0x588b494f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,860 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49366, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,876 DEBUG [hconnection-0x426b28d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:34:41,880 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49374, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:41,920 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:41,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:34:41,944 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49382, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:34:41,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:41,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:41,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:41,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:41,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:41,961 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:42,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,003 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,101 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-12T22:34:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:42,111 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-12T22:34:42,112 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-12T22:34:42,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/43ffbb4e5e774c17bb5ed00a24ed9740 is 50, key is test_row_0/A:col10/1734042881894/Put/seqid=0 2024-12-12T22:34:42,259 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042942346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,384 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042942366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042942369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042942379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741839_1015 (size=12001) 2024-12-12T22:34:42,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042942409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,443 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,502 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042942499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042942500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042942514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042942514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042942549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,619 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,711 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:34:42,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042942725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042942726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042942729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,753 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042942729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042942766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,787 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/43ffbb4e5e774c17bb5ed00a24ed9740 2024-12-12T22:34:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:42,975 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:42,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:42,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:42,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:42,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:42,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/513462e1f8c4415c9c529cba4a908219 is 50, key is test_row_0/B:col10/1734042881894/Put/seqid=0 2024-12-12T22:34:43,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042943050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,058 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042943050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-12T22:34:43,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-12T22:34:43,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-12T22:34:43,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-12T22:34:43,068 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T22:34:43,068 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-12T22:34:43,069 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-12T22:34:43,069 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-12T22:34:43,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042943065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042943068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,081 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-12T22:34:43,081 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-12T22:34:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042943080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741840_1016 (size=12001) 2024-12-12T22:34:43,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/513462e1f8c4415c9c529cba4a908219 2024-12-12T22:34:43,149 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,156 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fe778f880dd64b9eb6ce6f9663c9524c is 50, key is test_row_0/C:col10/1734042881894/Put/seqid=0 2024-12-12T22:34:43,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741841_1017 (size=12001) 2024-12-12T22:34:43,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fe778f880dd64b9eb6ce6f9663c9524c 2024-12-12T22:34:43,316 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/43ffbb4e5e774c17bb5ed00a24ed9740 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740 2024-12-12T22:34:43,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:34:43,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/513462e1f8c4415c9c529cba4a908219 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219 2024-12-12T22:34:43,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:34:43,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fe778f880dd64b9eb6ce6f9663c9524c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c 2024-12-12T22:34:43,478 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:34:43,479 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1541ms, sequenceid=12, compaction requested=false 2024-12-12T22:34:43,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:43,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:43,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:43,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/51475b4528894ac4aeaa79d4b6bf0825 is 50, key is test_row_0/A:col10/1734042882340/Put/seqid=0 2024-12-12T22:34:43,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741842_1018 (size=12001) 2024-12-12T22:34:43,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/51475b4528894ac4aeaa79d4b6bf0825 2024-12-12T22:34:43,804 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042943790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042943791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/413ef4c641124a92ad7b93e237aff552 is 50, key is test_row_0/B:col10/1734042882340/Put/seqid=0 2024-12-12T22:34:43,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042943802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,847 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042943802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042943816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:43,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741843_1019 (size=12001) 2024-12-12T22:34:43,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042943932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042943934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,957 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042943955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042943957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,965 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:43,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:43,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:43,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:43,969 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:43,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042943970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,124 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042944162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,186 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042944164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042944166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042944170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042944185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,279 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/413ef4c641124a92ad7b93e237aff552 2024-12-12T22:34:44,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/ac798b8565234e44b7e80af09015d1b2 is 50, key is test_row_0/C:col10/1734042882340/Put/seqid=0 2024-12-12T22:34:44,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741844_1020 (size=12001) 2024-12-12T22:34:44,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042944494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042944495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042944515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042944522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:44,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042944524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,611 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,771 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/ac798b8565234e44b7e80af09015d1b2 2024-12-12T22:34:44,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/51475b4528894ac4aeaa79d4b6bf0825 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825 2024-12-12T22:34:44,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T22:34:44,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/413ef4c641124a92ad7b93e237aff552 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552 2024-12-12T22:34:44,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:44,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:44,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:44,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:44,944 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:44,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T22:34:44,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/ac798b8565234e44b7e80af09015d1b2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2 2024-12-12T22:34:45,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042945012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042945011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2, entries=150, sequenceid=39, filesize=11.7 K 2024-12-12T22:34:45,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1494ms, sequenceid=39, compaction requested=false 2024-12-12T22:34:45,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:45,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:45,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:34:45,107 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,111 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:45,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/764749e6a9a84047b02697002d30a2e9 is 50, key is test_row_0/A:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:45,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741845_1021 (size=16681) 2024-12-12T22:34:45,267 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/764749e6a9a84047b02697002d30a2e9 2024-12-12T22:34:45,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e58508bec12b41d69930d1d7b986fa53 is 50, key is test_row_0/B:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:45,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741846_1022 (size=12001) 2024-12-12T22:34:45,439 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,449 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042945518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042945522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042945573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042945654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042945663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042945694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,786 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,812 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e58508bec12b41d69930d1d7b986fa53 2024-12-12T22:34:45,859 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/10812344abce4fe69064d55e0e886d59 is 50, key is test_row_0/C:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:45,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042945890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042945901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:45,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042945922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741847_1023 (size=12001) 2024-12-12T22:34:45,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/10812344abce4fe69064d55e0e886d59 2024-12-12T22:34:45,951 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:45,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:45,953 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:45,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/764749e6a9a84047b02697002d30a2e9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9 2024-12-12T22:34:46,036 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042946032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042946050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9, entries=250, sequenceid=51, filesize=16.3 K 2024-12-12T22:34:46,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e58508bec12b41d69930d1d7b986fa53 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53 2024-12-12T22:34:46,116 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T22:34:46,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/10812344abce4fe69064d55e0e886d59 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59 2024-12-12T22:34:46,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T22:34:46,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1111ms, sequenceid=51, compaction requested=true 2024-12-12T22:34:46,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:46,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:46,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:34:46,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-12T22:34:46,248 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:46,251 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:46,275 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,280 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:46,281 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:46,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:46,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:46,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:46,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:46,282 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:46,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:46,282 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:46,282 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,282 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=39.7 K 2024-12-12T22:34:46,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,286 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:46,287 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,287 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.2 K 2024-12-12T22:34:46,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042946273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,299 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 513462e1f8c4415c9c529cba4a908219, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042881894 2024-12-12T22:34:46,299 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43ffbb4e5e774c17bb5ed00a24ed9740, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042881894 2024-12-12T22:34:46,305 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 413ef4c641124a92ad7b93e237aff552, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734042882340 2024-12-12T22:34:46,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042946284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,308 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e58508bec12b41d69930d1d7b986fa53, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:46,310 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51475b4528894ac4aeaa79d4b6bf0825, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734042882340 2024-12-12T22:34:46,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042946300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,324 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/9b6884077a8049568a262c568ff60240 is 50, key is test_row_0/A:col10/1734042886237/Put/seqid=0 2024-12-12T22:34:46,328 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 764749e6a9a84047b02697002d30a2e9, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:46,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741848_1024 (size=12001) 2024-12-12T22:34:46,431 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042946403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042946411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042946429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,446 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#10 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:46,446 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,447 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/542a328dddf843c296998a52cebed323 is 50, key is test_row_0/B:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:46,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,451 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,468 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#11 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:46,469 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6956b55657d946339a366cda68de997e is 50, key is test_row_0/A:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:46,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741849_1025 (size=12104) 2024-12-12T22:34:46,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741850_1026 (size=12104) 2024-12-12T22:34:46,558 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/542a328dddf843c296998a52cebed323 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/542a328dddf843c296998a52cebed323 2024-12-12T22:34:46,612 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,613 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,613 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,617 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into 542a328dddf843c296998a52cebed323(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:46,617 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:46,617 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=13, startTime=1734042886247; duration=0sec 2024-12-12T22:34:46,618 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:46,618 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:46,618 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:46,634 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6956b55657d946339a366cda68de997e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6956b55657d946339a366cda68de997e 2024-12-12T22:34:46,641 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:46,641 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:46,641 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,641 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.2 K 2024-12-12T22:34:46,645 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting fe778f880dd64b9eb6ce6f9663c9524c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042881894 2024-12-12T22:34:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042946636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,649 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ac798b8565234e44b7e80af09015d1b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1734042882340 2024-12-12T22:34:46,660 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 10812344abce4fe69064d55e0e886d59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:46,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042946646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042946646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,709 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 6956b55657d946339a366cda68de997e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:46,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:46,710 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=13, startTime=1734042886213; duration=0sec 2024-12-12T22:34:46,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:46,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:46,772 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#12 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:46,773 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fb9a1aa4213145b0aa67de4b0d66af6e is 50, key is test_row_0/C:col10/1734042883612/Put/seqid=0 2024-12-12T22:34:46,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/9b6884077a8049568a262c568ff60240 2024-12-12T22:34:46,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741851_1027 (size=12104) 2024-12-12T22:34:46,884 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fb9a1aa4213145b0aa67de4b0d66af6e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fb9a1aa4213145b0aa67de4b0d66af6e 2024-12-12T22:34:46,913 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into fb9a1aa4213145b0aa67de4b0d66af6e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:46,913 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:46,913 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=13, startTime=1734042886247; duration=0sec 2024-12-12T22:34:46,913 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:46,913 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:46,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/91d6fe970f2249cdb0ff90ad424730ef is 50, key is test_row_0/B:col10/1734042886237/Put/seqid=0 2024-12-12T22:34:46,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:46,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:46,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:46,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:46,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042946957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042946980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:46,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:46,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042946988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741852_1028 (size=12001) 2024-12-12T22:34:47,041 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/91d6fe970f2249cdb0ff90ad424730ef 2024-12-12T22:34:47,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3299ee5b05ba4788a9156889094ff8f9 is 50, key is test_row_0/C:col10/1734042886237/Put/seqid=0 2024-12-12T22:34:47,100 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,100 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:47,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:47,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741853_1029 (size=12001) 2024-12-12T22:34:47,257 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:47,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,419 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:47,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:47,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:47,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042947470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:47,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042947515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,540 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=83 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3299ee5b05ba4788a9156889094ff8f9 2024-12-12T22:34:47,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:47,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042947532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/9b6884077a8049568a262c568ff60240 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240 2024-12-12T22:34:47,595 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,596 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:47,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:47,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:47,624 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240, entries=150, sequenceid=83, filesize=11.7 K 2024-12-12T22:34:47,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/91d6fe970f2249cdb0ff90ad424730ef as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef 2024-12-12T22:34:47,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef, entries=150, sequenceid=83, filesize=11.7 K 2024-12-12T22:34:47,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3299ee5b05ba4788a9156889094ff8f9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9 2024-12-12T22:34:47,729 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9, entries=150, sequenceid=83, filesize=11.7 K 2024-12-12T22:34:47,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1486ms, sequenceid=83, compaction requested=false 2024-12-12T22:34:47,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:47,759 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:47,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-12T22:34:47,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:47,769 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:47,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:47,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ca38a87ed5934bdb9101822d09a0366e is 50, key is test_row_0/A:col10/1734042886287/Put/seqid=0 2024-12-12T22:34:47,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741854_1030 (size=7315) 2024-12-12T22:34:48,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:48,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:48,248 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ca38a87ed5934bdb9101822d09a0366e 2024-12-12T22:34:48,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e3fc55c2728d40b9a99fde0a907dd197 is 50, key is test_row_0/B:col10/1734042886287/Put/seqid=0 2024-12-12T22:34:48,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741855_1031 (size=7315) 2024-12-12T22:34:48,375 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e3fc55c2728d40b9a99fde0a907dd197 2024-12-12T22:34:48,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042948433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042948437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e19747da840e4e198d571133e35d48c6 is 50, key is test_row_0/C:col10/1734042886287/Put/seqid=0 2024-12-12T22:34:48,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042948486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741856_1032 (size=7315) 2024-12-12T22:34:48,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042948531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042948560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,569 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042948562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042948564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042948771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042948773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:48,922 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e19747da840e4e198d571133e35d48c6 2024-12-12T22:34:48,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ca38a87ed5934bdb9101822d09a0366e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e 2024-12-12T22:34:49,013 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e, entries=50, sequenceid=90, filesize=7.1 K 2024-12-12T22:34:49,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e3fc55c2728d40b9a99fde0a907dd197 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197 2024-12-12T22:34:49,065 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197, entries=50, sequenceid=90, filesize=7.1 K 2024-12-12T22:34:49,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e19747da840e4e198d571133e35d48c6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6 2024-12-12T22:34:49,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042949077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:49,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:49,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042949088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:49,114 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6, entries=50, sequenceid=90, filesize=7.1 K 2024-12-12T22:34:49,120 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=201.27 KB/206100 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1350ms, sequenceid=90, compaction requested=true 2024-12-12T22:34:49,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:49,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:49,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-12T22:34:49,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-12T22:34:49,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-12T22:34:49,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 7.3530 sec 2024-12-12T22:34:49,195 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 7.4180 sec 2024-12-12T22:34:49,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:49,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-12-12T22:34:49,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:49,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:49,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:49,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:49,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:49,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:49,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:49,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042949605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:49,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:49,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042949590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:49,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/28306e0f5899446981adb14fd837a0b6 is 50, key is test_row_0/A:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:49,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741857_1033 (size=12001) 2024-12-12T22:34:49,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/28306e0f5899446981adb14fd837a0b6 2024-12-12T22:34:49,820 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/47e14be3a45249faba26494eb89a18c2 is 50, key is test_row_0/B:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:49,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741858_1034 (size=12001) 2024-12-12T22:34:49,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-12T22:34:49,932 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-12T22:34:49,938 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:34:49,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-12T22:34:49,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:49,946 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:34:49,958 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:34:49,959 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:34:50,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:50,119 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T22:34:50,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:50,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,122 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:50,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,289 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/47e14be3a45249faba26494eb89a18c2 2024-12-12T22:34:50,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T22:34:50,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:50,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/09d1cf323c5e4d799dfb83bdd1452474 is 50, key is test_row_0/C:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:50,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741859_1035 (size=12001) 2024-12-12T22:34:50,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/09d1cf323c5e4d799dfb83bdd1452474 2024-12-12T22:34:50,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/28306e0f5899446981adb14fd837a0b6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6 2024-12-12T22:34:50,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T22:34:50,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/47e14be3a45249faba26494eb89a18c2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2 2024-12-12T22:34:50,445 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T22:34:50,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:50,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:50,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T22:34:50,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/09d1cf323c5e4d799dfb83bdd1452474 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474 2024-12-12T22:34:50,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T22:34:50,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for 3292c08f1e2fe18d3fcbb52f186614f5 in 888ms, sequenceid=123, compaction requested=true 2024-12-12T22:34:50,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:50,491 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:50,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:50,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:50,493 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:50,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:50,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:50,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:50,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:50,502 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43421 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:50,502 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:50,503 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43421 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:50,503 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:50,503 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,503 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/542a328dddf843c296998a52cebed323, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=42.4 K 2024-12-12T22:34:50,504 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,504 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6956b55657d946339a366cda68de997e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=42.4 K 2024-12-12T22:34:50,505 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6956b55657d946339a366cda68de997e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:50,505 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 542a328dddf843c296998a52cebed323, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:50,506 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 91d6fe970f2249cdb0ff90ad424730ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1734042886237 2024-12-12T22:34:50,507 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b6884077a8049568a262c568ff60240, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1734042886237 2024-12-12T22:34:50,508 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e3fc55c2728d40b9a99fde0a907dd197, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042886287 2024-12-12T22:34:50,509 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca38a87ed5934bdb9101822d09a0366e, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042886287 2024-12-12T22:34:50,516 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28306e0f5899446981adb14fd837a0b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:50,516 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 47e14be3a45249faba26494eb89a18c2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:50,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:50,574 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:50,575 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/da3221900558410582680aefbb694ab0 is 50, key is test_row_0/B:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:50,602 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-12T22:34:50,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,604 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:34:50,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:50,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:50,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:50,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:50,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:50,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:50,617 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:50,618 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0bfea1f44be5459987b24c0bcd862bcf is 50, key is test_row_0/A:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:50,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:50,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:50,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/532d6dde08ed4fe38dd8841fe5c3d566 is 50, key is test_row_0/A:col10/1734042890581/Put/seqid=0 2024-12-12T22:34:50,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741860_1036 (size=12241) 2024-12-12T22:34:50,665 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/da3221900558410582680aefbb694ab0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/da3221900558410582680aefbb694ab0 2024-12-12T22:34:50,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741861_1037 (size=12241) 2024-12-12T22:34:50,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741862_1038 (size=12101) 2024-12-12T22:34:50,708 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into da3221900558410582680aefbb694ab0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:50,708 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:50,708 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=12, startTime=1734042890493; duration=0sec 2024-12-12T22:34:50,709 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:50,709 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:50,709 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:50,728 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0bfea1f44be5459987b24c0bcd862bcf as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0bfea1f44be5459987b24c0bcd862bcf 2024-12-12T22:34:50,733 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 43421 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:50,733 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:50,733 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:50,733 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fb9a1aa4213145b0aa67de4b0d66af6e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=42.4 K 2024-12-12T22:34:50,737 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting fb9a1aa4213145b0aa67de4b0d66af6e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042883612 2024-12-12T22:34:50,741 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3299ee5b05ba4788a9156889094ff8f9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1734042886237 2024-12-12T22:34:50,751 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e19747da840e4e198d571133e35d48c6, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042886287 2024-12-12T22:34:50,767 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 09d1cf323c5e4d799dfb83bdd1452474, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:50,784 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 0bfea1f44be5459987b24c0bcd862bcf(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:50,784 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:50,784 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=12, startTime=1734042890491; duration=0sec 2024-12-12T22:34:50,784 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:50,785 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:50,810 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#24 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:50,811 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/a7e19e2c254546aca5fd2a53448e4310 is 50, key is test_row_0/C:col10/1734042888422/Put/seqid=0 2024-12-12T22:34:50,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042950800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042950799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042950802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042950809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,830 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042950817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741863_1039 (size=12241) 2024-12-12T22:34:50,863 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/a7e19e2c254546aca5fd2a53448e4310 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/a7e19e2c254546aca5fd2a53448e4310 2024-12-12T22:34:50,881 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into a7e19e2c254546aca5fd2a53448e4310(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:50,881 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:50,881 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=12, startTime=1734042890494; duration=0sec 2024-12-12T22:34:50,881 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:50,881 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:50,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042950925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042950925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042950925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042950928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:50,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:50,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042950946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:51,086 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/532d6dde08ed4fe38dd8841fe5c3d566 2024-12-12T22:34:51,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042951133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042951134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042951139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/6c3951aa3653498588c85b2dd4825fa6 is 50, key is test_row_0/B:col10/1734042890581/Put/seqid=0 2024-12-12T22:34:51,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042951153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042951148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741864_1040 (size=12101) 2024-12-12T22:34:51,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042951477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042951480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042951481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042951482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042951488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:51,584 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/6c3951aa3653498588c85b2dd4825fa6 2024-12-12T22:34:51,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7623c66d0a9c49b6aae1b07a949c5947 is 50, key is test_row_0/C:col10/1734042890581/Put/seqid=0 2024-12-12T22:34:51,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741865_1041 (size=12101) 2024-12-12T22:34:51,665 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7623c66d0a9c49b6aae1b07a949c5947 2024-12-12T22:34:51,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/532d6dde08ed4fe38dd8841fe5c3d566 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566 2024-12-12T22:34:51,699 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T22:34:51,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/6c3951aa3653498588c85b2dd4825fa6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6 2024-12-12T22:34:51,720 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T22:34:51,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7623c66d0a9c49b6aae1b07a949c5947 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947 2024-12-12T22:34:51,743 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947, entries=150, sequenceid=132, filesize=11.8 K 2024-12-12T22:34:51,746 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1142ms, sequenceid=132, compaction requested=false 2024-12-12T22:34:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-12T22:34:51,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-12T22:34:51,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-12T22:34:51,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7900 sec 2024-12-12T22:34:51,758 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.8180 sec 2024-12-12T22:34:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:51,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T22:34:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:51,997 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:51,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:51,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:52,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/a4dfab5a20f24970bf3ef63c9b6a03da is 50, key is test_row_0/A:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042952017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042952019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042952035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042952048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-12T22:34:52,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,052 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-12T22:34:52,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042952048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741866_1042 (size=12151) 2024-12-12T22:34:52,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/a4dfab5a20f24970bf3ef63c9b6a03da 2024-12-12T22:34:52,076 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:34:52,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-12T22:34:52,085 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:34:52,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:52,086 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:34:52,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:34:52,089 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/c17f4b56c1e94644af659a8c3e3f789c is 50, key is test_row_0/B:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741867_1043 (size=12151) 2024-12-12T22:34:52,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/c17f4b56c1e94644af659a8c3e3f789c 2024-12-12T22:34:52,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042952153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042952139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042952157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042952162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042952164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:52,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3cfcac590db74b74bf6e7445db516cc6 is 50, key is test_row_0/C:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741868_1044 (size=12151) 2024-12-12T22:34:52,241 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=163 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3cfcac590db74b74bf6e7445db516cc6 2024-12-12T22:34:52,257 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T22:34:52,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:52,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/a4dfab5a20f24970bf3ef63c9b6a03da as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da 2024-12-12T22:34:52,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da, entries=150, sequenceid=163, filesize=11.9 K 2024-12-12T22:34:52,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/c17f4b56c1e94644af659a8c3e3f789c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c 2024-12-12T22:34:52,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042952366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,370 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042952369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042952369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042952372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042952372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:52,423 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T22:34:52,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:52,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:52,433 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c, entries=150, sequenceid=163, filesize=11.9 K 2024-12-12T22:34:52,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/3cfcac590db74b74bf6e7445db516cc6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6 2024-12-12T22:34:52,489 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6, entries=150, sequenceid=163, filesize=11.9 K 2024-12-12T22:34:52,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 3292c08f1e2fe18d3fcbb52f186614f5 in 500ms, sequenceid=163, compaction requested=true 2024-12-12T22:34:52,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:52,498 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:52,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:34:52,499 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:52,506 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:52,506 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:52,506 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,506 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/da3221900558410582680aefbb694ab0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.6 K 2024-12-12T22:34:52,512 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting da3221900558410582680aefbb694ab0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:52,515 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:52,515 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:52,516 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,516 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0bfea1f44be5459987b24c0bcd862bcf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.6 K 2024-12-12T22:34:52,523 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c3951aa3653498588c85b2dd4825fa6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734042890544 2024-12-12T22:34:52,523 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bfea1f44be5459987b24c0bcd862bcf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:52,527 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c17f4b56c1e94644af659a8c3e3f789c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:52,527 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 532d6dde08ed4fe38dd8841fe5c3d566, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734042890544 2024-12-12T22:34:52,539 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a4dfab5a20f24970bf3ef63c9b6a03da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:52,575 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:52,576 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/dbf35eb5e4064abeb53467ffcf4af768 is 50, key is test_row_0/B:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,587 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-12T22:34:52,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,592 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:52,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:52,617 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#31 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:52,618 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/9138ca1519c54e5783fa5cff136a7dc8 is 50, key is test_row_0/A:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741869_1045 (size=12493) 2024-12-12T22:34:52,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/c36e356386ba4dab819ba6579fe30312 is 50, key is test_row_0/A:col10/1734042892001/Put/seqid=0 2024-12-12T22:34:52,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741870_1046 (size=12493) 2024-12-12T22:34:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:52,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741871_1047 (size=12151) 2024-12-12T22:34:52,701 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/c36e356386ba4dab819ba6579fe30312 2024-12-12T22:34:52,704 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:52,717 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/9138ca1519c54e5783fa5cff136a7dc8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9138ca1519c54e5783fa5cff136a7dc8 2024-12-12T22:34:52,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/d661468c1aca45fc968badfac293d136 is 50, key is test_row_0/B:col10/1734042892001/Put/seqid=0 2024-12-12T22:34:52,741 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 9138ca1519c54e5783fa5cff136a7dc8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:52,741 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:52,742 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=13, startTime=1734042892497; duration=0sec 2024-12-12T22:34:52,742 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:52,742 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:52,742 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:52,748 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:52,748 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:52,748 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:52,748 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/a7e19e2c254546aca5fd2a53448e4310, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.6 K 2024-12-12T22:34:52,749 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a7e19e2c254546aca5fd2a53448e4310, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042888403 2024-12-12T22:34:52,752 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7623c66d0a9c49b6aae1b07a949c5947, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1734042890544 2024-12-12T22:34:52,760 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cfcac590db74b74bf6e7445db516cc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:52,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741872_1048 (size=12151) 2024-12-12T22:34:52,806 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:52,807 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fdc46cebf5a24626a36a7173865baa65 is 50, key is test_row_0/C:col10/1734042891989/Put/seqid=0 2024-12-12T22:34:52,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042952825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042952821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042952829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042952830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042952832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741873_1049 (size=12493) 2024-12-12T22:34:52,880 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/fdc46cebf5a24626a36a7173865baa65 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fdc46cebf5a24626a36a7173865baa65 2024-12-12T22:34:52,900 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into fdc46cebf5a24626a36a7173865baa65(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:52,900 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:52,900 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=13, startTime=1734042892498; duration=0sec 2024-12-12T22:34:52,900 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:52,900 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:52,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042952936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042952936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042952946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042952946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:52,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:52,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042952951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,061 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/dbf35eb5e4064abeb53467ffcf4af768 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/dbf35eb5e4064abeb53467ffcf4af768 2024-12-12T22:34:53,084 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into dbf35eb5e4064abeb53467ffcf4af768(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:53,084 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:53,084 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=13, startTime=1734042892498; duration=0sec 2024-12-12T22:34:53,085 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:53,085 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:53,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042953160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042953160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042953161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042953161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042953164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,186 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/d661468c1aca45fc968badfac293d136 2024-12-12T22:34:53,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:53,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/83b9578de899427985d3e3e6f5a29738 is 50, key is test_row_0/C:col10/1734042892001/Put/seqid=0 2024-12-12T22:34:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741874_1050 (size=12151) 2024-12-12T22:34:53,274 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/83b9578de899427985d3e3e6f5a29738 2024-12-12T22:34:53,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/c36e356386ba4dab819ba6579fe30312 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312 2024-12-12T22:34:53,309 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T22:34:53,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/d661468c1aca45fc968badfac293d136 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136 2024-12-12T22:34:53,321 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T22:34:53,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/83b9578de899427985d3e3e6f5a29738 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738 2024-12-12T22:34:53,349 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738, entries=150, sequenceid=172, filesize=11.9 K 2024-12-12T22:34:53,352 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=174.43 KB/178620 for 3292c08f1e2fe18d3fcbb52f186614f5 in 761ms, sequenceid=172, compaction requested=false 2024-12-12T22:34:53,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:53,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-12T22:34:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-12T22:34:53,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-12T22:34:53,383 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2810 sec 2024-12-12T22:34:53,397 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.3110 sec 2024-12-12T22:34:53,482 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-12T22:34:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:53,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:53,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:53,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:53,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:53,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:53,496 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:53,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042953484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042953503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042953510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,516 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042953512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/318525f07e1e4a2d8929ac4a3c6c1533 is 50, key is test_row_0/A:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:53,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042953507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741875_1051 (size=12151) 2024-12-12T22:34:53,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/318525f07e1e4a2d8929ac4a3c6c1533 2024-12-12T22:34:53,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042953613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042953621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042953622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042953623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042953624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/75007aa659b440a7b520c38a10e879bf is 50, key is test_row_0/B:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:53,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741876_1052 (size=12151) 2024-12-12T22:34:53,695 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/75007aa659b440a7b520c38a10e879bf 2024-12-12T22:34:53,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/66f4d25570094a67aca51c77d9083777 is 50, key is test_row_0/C:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:53,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042953828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042953830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042953828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:53,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042953830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042953833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:53,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741877_1053 (size=12151) 2024-12-12T22:34:53,844 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/66f4d25570094a67aca51c77d9083777 2024-12-12T22:34:53,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/318525f07e1e4a2d8929ac4a3c6c1533 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533 2024-12-12T22:34:53,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T22:34:53,921 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/75007aa659b440a7b520c38a10e879bf as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf 2024-12-12T22:34:53,954 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T22:34:53,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/66f4d25570094a67aca51c77d9083777 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777 2024-12-12T22:34:53,996 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777, entries=150, sequenceid=207, filesize=11.9 K 2024-12-12T22:34:53,997 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=20.13 KB/20610 for 3292c08f1e2fe18d3fcbb52f186614f5 in 515ms, sequenceid=207, compaction requested=true 2024-12-12T22:34:53,998 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:53,998 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:53,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:53,998 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:53,999 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:53,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:54,012 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:54,012 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:54,012 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,013 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/dbf35eb5e4064abeb53467ffcf4af768, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.9 K 2024-12-12T22:34:54,016 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:54,016 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:54,016 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,016 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9138ca1519c54e5783fa5cff136a7dc8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.9 K 2024-12-12T22:34:54,017 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting dbf35eb5e4064abeb53467ffcf4af768, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:54,021 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9138ca1519c54e5783fa5cff136a7dc8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:54,021 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d661468c1aca45fc968badfac293d136, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734042892001 2024-12-12T22:34:54,027 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c36e356386ba4dab819ba6579fe30312, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734042892001 2024-12-12T22:34:54,027 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 75007aa659b440a7b520c38a10e879bf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:54,033 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 318525f07e1e4a2d8929ac4a3c6c1533, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:54,101 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#39 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:54,102 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/4afba25a712140bda079f83ff7af94c0 is 50, key is test_row_0/B:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:54,105 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#40 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:54,106 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/dba4dacb0c5145a68e8ea49a52ea30dd is 50, key is test_row_0/A:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:54,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741878_1054 (size=12595) 2024-12-12T22:34:54,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:54,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:34:54,173 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/4afba25a712140bda079f83ff7af94c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4afba25a712140bda079f83ff7af94c0 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:54,176 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,191 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into 4afba25a712140bda079f83ff7af94c0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:54,191 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:54,191 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=13, startTime=1734042893999; duration=0sec 2024-12-12T22:34:54,191 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:54,191 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:54,191 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:54,193 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:54,194 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:54,195 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,197 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fdc46cebf5a24626a36a7173865baa65, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=35.9 K 2024-12-12T22:34:54,198 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting fdc46cebf5a24626a36a7173865baa65, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=163, earliestPutTs=1734042890800 2024-12-12T22:34:54,199 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 83b9578de899427985d3e3e6f5a29738, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1734042892001 2024-12-12T22:34:54,200 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 66f4d25570094a67aca51c77d9083777, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:54,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-12T22:34:54,212 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-12T22:34:54,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 is 50, key is test_row_0/A:col10/1734042894168/Put/seqid=0 2024-12-12T22:34:54,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741879_1055 (size=12595) 2024-12-12T22:34:54,219 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:34:54,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-12T22:34:54,231 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:34:54,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:54,232 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:34:54,233 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:34:54,241 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:54,242 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/ece9f1d40bfe42eb95e709be191e28a3 is 50, key is test_row_0/C:col10/1734042893481/Put/seqid=0 2024-12-12T22:34:54,258 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/dba4dacb0c5145a68e8ea49a52ea30dd as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/dba4dacb0c5145a68e8ea49a52ea30dd 2024-12-12T22:34:54,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042954248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042954251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042954254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042954268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,279 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into dba4dacb0c5145a68e8ea49a52ea30dd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:54,280 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:54,280 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=13, startTime=1734042893998; duration=0sec 2024-12-12T22:34:54,280 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:54,280 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:54,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042954278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741880_1056 (size=16931) 2024-12-12T22:34:54,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741881_1057 (size=12595) 2024-12-12T22:34:54,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 2024-12-12T22:34:54,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:54,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/3927a596aacb47329c8d95be388bac13 is 50, key is test_row_0/B:col10/1734042894168/Put/seqid=0 2024-12-12T22:34:54,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042954374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,386 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T22:34:54,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042954380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:54,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,391 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042954386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042954379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,404 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042954399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741882_1058 (size=12151) 2024-12-12T22:34:54,411 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/3927a596aacb47329c8d95be388bac13 2024-12-12T22:34:54,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e037b350c44c4587ac79557ecc22cdcb is 50, key is test_row_0/C:col10/1734042894168/Put/seqid=0 2024-12-12T22:34:54,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741883_1059 (size=12151) 2024-12-12T22:34:54,514 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e037b350c44c4587ac79557ecc22cdcb 2024-12-12T22:34:54,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:54,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T22:34:54,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:54,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:54,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 2024-12-12T22:34:54,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042954579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5, entries=250, sequenceid=219, filesize=16.5 K 2024-12-12T22:34:54,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/3927a596aacb47329c8d95be388bac13 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13 2024-12-12T22:34:54,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042954593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042954599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042954603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042954615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T22:34:54,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/e037b350c44c4587ac79557ecc22cdcb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb 2024-12-12T22:34:54,665 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T22:34:54,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3292c08f1e2fe18d3fcbb52f186614f5 in 501ms, sequenceid=219, compaction requested=false 2024-12-12T22:34:54,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:54,715 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-12T22:34:54,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:54,723 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:34:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:54,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:54,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:54,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/fdd23cbfa7de41e69f3becae5985247d is 50, key is test_row_0/A:col10/1734042894263/Put/seqid=0 2024-12-12T22:34:54,743 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/ece9f1d40bfe42eb95e709be191e28a3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ece9f1d40bfe42eb95e709be191e28a3 2024-12-12T22:34:54,769 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into ece9f1d40bfe42eb95e709be191e28a3(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:54,769 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:54,769 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=13, startTime=1734042893999; duration=0sec 2024-12-12T22:34:54,770 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:54,770 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:54,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741884_1060 (size=12151) 2024-12-12T22:34:54,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:54,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:54,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:54,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042954938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042954940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042954942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,966 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042954948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:54,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:54,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042954957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042955064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042955084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042955077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042955086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042955091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,213 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/fdd23cbfa7de41e69f3becae5985247d 2024-12-12T22:34:55,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/4c6cf8204974471b9de9140d65bc1363 is 50, key is test_row_0/B:col10/1734042894263/Put/seqid=0 2024-12-12T22:34:55,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042955278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042955289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042955308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042955308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042955308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741885_1061 (size=12151) 2024-12-12T22:34:55,335 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/4c6cf8204974471b9de9140d65bc1363 2024-12-12T22:34:55,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:55,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0dcfe8f206d84b5483bbf69e5f012ce0 is 50, key is test_row_0/C:col10/1734042894263/Put/seqid=0 2024-12-12T22:34:55,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741886_1062 (size=12151) 2024-12-12T22:34:55,492 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0dcfe8f206d84b5483bbf69e5f012ce0 2024-12-12T22:34:55,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/fdd23cbfa7de41e69f3becae5985247d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d 2024-12-12T22:34:55,527 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T22:34:55,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/4c6cf8204974471b9de9140d65bc1363 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363 2024-12-12T22:34:55,546 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T22:34:55,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0dcfe8f206d84b5483bbf69e5f012ce0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0 2024-12-12T22:34:55,592 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0, entries=150, sequenceid=245, filesize=11.9 K 2024-12-12T22:34:55,610 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3292c08f1e2fe18d3fcbb52f186614f5 in 887ms, sequenceid=245, compaction requested=true 2024-12-12T22:34:55,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:55,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:55,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-12T22:34:55,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-12T22:34:55,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:34:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:55,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:55,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:55,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:55,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-12T22:34:55,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3940 sec 2024-12-12T22:34:55,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.4180 sec 2024-12-12T22:34:55,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0da202561851456bbedc784efe6f7404 is 50, key is test_row_0/A:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:55,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741887_1063 (size=12251) 2024-12-12T22:34:55,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042955721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0da202561851456bbedc784efe6f7404 2024-12-12T22:34:55,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042955722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042955739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042955738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,763 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042955744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/8429f78553a042e7bdea57f1aef9acc6 is 50, key is test_row_0/B:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:55,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741888_1064 (size=12251) 2024-12-12T22:34:55,847 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/8429f78553a042e7bdea57f1aef9acc6 2024-12-12T22:34:55,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042955871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042955871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042955873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042955874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:55,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042955883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:55,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0e0a90e658544e6396f98febd7671a07 is 50, key is test_row_0/C:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:55,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741889_1065 (size=12251) 2024-12-12T22:34:55,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0e0a90e658544e6396f98febd7671a07 2024-12-12T22:34:56,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0da202561851456bbedc784efe6f7404 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404 2024-12-12T22:34:56,053 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T22:34:56,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/8429f78553a042e7bdea57f1aef9acc6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6 2024-12-12T22:34:56,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042956079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042956080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042956087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042956088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T22:34:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042956100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/0e0a90e658544e6396f98febd7671a07 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07 2024-12-12T22:34:56,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07, entries=150, sequenceid=260, filesize=12.0 K 2024-12-12T22:34:56,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 3292c08f1e2fe18d3fcbb52f186614f5 in 528ms, sequenceid=260, compaction requested=true 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:56,152 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:56,152 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:56,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:56,161 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53928 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:56,162 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:56,162 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,162 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/dba4dacb0c5145a68e8ea49a52ea30dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=52.7 K 2024-12-12T22:34:56,162 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49148 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:56,162 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:56,162 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,163 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4afba25a712140bda079f83ff7af94c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=48.0 K 2024-12-12T22:34:56,163 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 4afba25a712140bda079f83ff7af94c0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:56,164 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting dba4dacb0c5145a68e8ea49a52ea30dd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:56,164 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3927a596aacb47329c8d95be388bac13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734042893499 2024-12-12T22:34:56,164 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 96e250d25e3f4d8aa0f5d96a5b15b8d5, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734042893499 2024-12-12T22:34:56,164 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c6cf8204974471b9de9140d65bc1363, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734042894237 2024-12-12T22:34:56,165 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdd23cbfa7de41e69f3becae5985247d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734042894237 2024-12-12T22:34:56,165 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0da202561851456bbedc784efe6f7404, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:56,165 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 8429f78553a042e7bdea57f1aef9acc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:56,225 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:56,226 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/cfd1d6128fba4af09fee7847e13d18c0 is 50, key is test_row_0/B:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:56,245 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:56,246 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0dd19ec2deab493b964a35db46fb3d15 is 50, key is test_row_0/A:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:56,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741891_1067 (size=12831) 2024-12-12T22:34:56,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741890_1066 (size=12831) 2024-12-12T22:34:56,350 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0dd19ec2deab493b964a35db46fb3d15 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0dd19ec2deab493b964a35db46fb3d15 2024-12-12T22:34:56,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-12T22:34:56,357 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-12T22:34:56,361 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/cfd1d6128fba4af09fee7847e13d18c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/cfd1d6128fba4af09fee7847e13d18c0 2024-12-12T22:34:56,372 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 0dd19ec2deab493b964a35db46fb3d15(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:56,372 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:56,372 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=12, startTime=1734042896152; duration=0sec 2024-12-12T22:34:56,372 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:56,373 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:56,373 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:34:56,374 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:34:56,376 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into cfd1d6128fba4af09fee7847e13d18c0(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:56,376 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:56,376 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=12, startTime=1734042896152; duration=0sec 2024-12-12T22:34:56,376 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:56,376 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:56,377 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49148 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:34:56,377 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:56,377 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,377 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ece9f1d40bfe42eb95e709be191e28a3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=48.0 K 2024-12-12T22:34:56,378 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting ece9f1d40bfe42eb95e709be191e28a3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042893472 2024-12-12T22:34:56,379 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e037b350c44c4587ac79557ecc22cdcb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734042893499 2024-12-12T22:34:56,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-12T22:34:56,380 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dcfe8f206d84b5483bbf69e5f012ce0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1734042894237 2024-12-12T22:34:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:56,386 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e0a90e658544e6396f98febd7671a07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:56,388 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:34:56,391 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:34:56,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:34:56,407 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#53 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:56,408 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/9c91a98b837f47298e085aab3d5680ef is 50, key is test_row_0/C:col10/1734042895609/Put/seqid=0 2024-12-12T22:34:56,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:34:56,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:56,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:56,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:56,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:56,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:56,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:56,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:56,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042956426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042956429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042956431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042956431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/639e7da10c394a068b0e840602ec1d78 is 50, key is test_row_0/A:col10/1734042896410/Put/seqid=0 2024-12-12T22:34:56,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042956453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741892_1068 (size=12831) 2024-12-12T22:34:56,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:56,508 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/9c91a98b837f47298e085aab3d5680ef as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/9c91a98b837f47298e085aab3d5680ef 2024-12-12T22:34:56,525 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into 9c91a98b837f47298e085aab3d5680ef(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:56,525 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:56,525 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=12, startTime=1734042896152; duration=0sec 2024-12-12T22:34:56,526 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:56,526 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:56,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741893_1069 (size=12301) 2024-12-12T22:34:56,536 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/639e7da10c394a068b0e840602ec1d78 2024-12-12T22:34:56,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042956536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042956539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042956539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042956540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,555 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:56,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:56,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042956565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,584 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 is 50, key is test_row_0/B:col10/1734042896410/Put/seqid=0 2024-12-12T22:34:56,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741894_1070 (size=12301) 2024-12-12T22:34:56,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 2024-12-12T22:34:56,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:56,719 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:56,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:56,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/719c44abf2224b10838c7b0c780d5296 is 50, key is test_row_0/C:col10/1734042896410/Put/seqid=0 2024-12-12T22:34:56,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042956744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741895_1071 (size=12301) 2024-12-12T22:34:56,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042956746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042956756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042956757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,764 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/719c44abf2224b10838c7b0c780d5296 2024-12-12T22:34:56,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:56,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042956777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/639e7da10c394a068b0e840602ec1d78 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78 2024-12-12T22:34:56,862 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T22:34:56,882 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:56,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 2024-12-12T22:34:56,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:56,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:56,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:56,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T22:34:56,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/719c44abf2224b10838c7b0c780d5296 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296 2024-12-12T22:34:56,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T22:34:56,953 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3292c08f1e2fe18d3fcbb52f186614f5 in 540ms, sequenceid=289, compaction requested=false 2024-12-12T22:34:56,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:56,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:57,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:34:57,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:57,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:57,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:57,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:57,063 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,065 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/aa06c34e773c455b989d1fa713017778 is 50, key is test_row_0/A:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741896_1072 (size=14741) 2024-12-12T22:34:57,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/aa06c34e773c455b989d1fa713017778 2024-12-12T22:34:57,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042957148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042957147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042957153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042957153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042957159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e7c939b3a577423d910f1555e00f52f2 is 50, key is test_row_0/B:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,219 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741897_1073 (size=12301) 2024-12-12T22:34:57,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e7c939b3a577423d910f1555e00f52f2 2024-12-12T22:34:57,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042957271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042957271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042957272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/729af903505544c5b46401c3819297b1 is 50, key is test_row_0/C:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042957282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042957292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741898_1074 (size=12301) 2024-12-12T22:34:57,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=301 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/729af903505544c5b46401c3819297b1 2024-12-12T22:34:57,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/aa06c34e773c455b989d1fa713017778 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778 2024-12-12T22:34:57,382 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,383 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778, entries=200, sequenceid=301, filesize=14.4 K 2024-12-12T22:34:57,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/e7c939b3a577423d910f1555e00f52f2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2 2024-12-12T22:34:57,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2, entries=150, sequenceid=301, filesize=12.0 K 2024-12-12T22:34:57,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/729af903505544c5b46401c3819297b1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1 2024-12-12T22:34:57,432 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1, entries=150, sequenceid=301, filesize=12.0 K 2024-12-12T22:34:57,436 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3292c08f1e2fe18d3fcbb52f186614f5 in 385ms, sequenceid=301, compaction requested=true 2024-12-12T22:34:57,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:57,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:57,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:57,436 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:57,436 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:57,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:57,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:57,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:57,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:57,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39873 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:57,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:57,440 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,440 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0dd19ec2deab493b964a35db46fb3d15, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=38.9 K 2024-12-12T22:34:57,441 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:57,441 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:57,442 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,442 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/cfd1d6128fba4af09fee7847e13d18c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=36.6 K 2024-12-12T22:34:57,442 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dd19ec2deab493b964a35db46fb3d15, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:57,443 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting cfd1d6128fba4af09fee7847e13d18c0, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:57,443 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 639e7da10c394a068b0e840602ec1d78, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734042895733 2024-12-12T22:34:57,446 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 42f9a6fe73c04ce5bcb88bcc079c4eb3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734042895733 2024-12-12T22:34:57,446 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting aa06c34e773c455b989d1fa713017778, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:57,447 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e7c939b3a577423d910f1555e00f52f2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:57,467 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#60 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:57,468 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/17f10d9c36b141b487be611e4b6b1426 is 50, key is test_row_0/A:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,470 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#61 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:57,470 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/5293710479ab4bd091134549807f1a81 is 50, key is test_row_0/B:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:57,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:34:57,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:57,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:57,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741899_1075 (size=12983) 2024-12-12T22:34:57,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741900_1076 (size=12983) 2024-12-12T22:34:57,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042957518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,531 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042957518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,533 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/17f10d9c36b141b487be611e4b6b1426 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/17f10d9c36b141b487be611e4b6b1426 2024-12-12T22:34:57,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042957525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042957528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,542 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,543 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,543 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042957537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,552 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 17f10d9c36b141b487be611e4b6b1426(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:57,552 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:57,552 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=13, startTime=1734042897436; duration=0sec 2024-12-12T22:34:57,552 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:57,552 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:57,552 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:57,553 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:57,553 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:57,554 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,554 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/9c91a98b837f47298e085aab3d5680ef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=36.6 K 2024-12-12T22:34:57,554 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c91a98b837f47298e085aab3d5680ef, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734042894939 2024-12-12T22:34:57,555 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 719c44abf2224b10838c7b0c780d5296, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734042895733 2024-12-12T22:34:57,556 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 729af903505544c5b46401c3819297b1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:57,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 is 50, key is test_row_0/A:col10/1734042897487/Put/seqid=0 2024-12-12T22:34:57,582 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#63 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:57,583 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/f94a23ff520c43158e5c5a8892eaf75d is 50, key is test_row_0/C:col10/1734042896424/Put/seqid=0 2024-12-12T22:34:57,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741901_1077 (size=12301) 2024-12-12T22:34:57,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 2024-12-12T22:34:57,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741902_1078 (size=12983) 2024-12-12T22:34:57,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/74458a0cedfa4617a58594de8d9876d7 is 50, key is test_row_0/B:col10/1734042897487/Put/seqid=0 2024-12-12T22:34:57,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042957637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042957639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042957639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,649 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/f94a23ff520c43158e5c5a8892eaf75d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/f94a23ff520c43158e5c5a8892eaf75d 2024-12-12T22:34:57,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042957652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,671 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into f94a23ff520c43158e5c5a8892eaf75d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:57,671 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:57,671 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=13, startTime=1734042897437; duration=0sec 2024-12-12T22:34:57,671 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:57,671 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:34:57,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741903_1079 (size=12301) 2024-12-12T22:34:57,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/74458a0cedfa4617a58594de8d9876d7 2024-12-12T22:34:57,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/61d6612002e8435682c29045d9416b57 is 50, key is test_row_0/C:col10/1734042897487/Put/seqid=0 2024-12-12T22:34:57,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741904_1080 (size=12301) 2024-12-12T22:34:57,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042957840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,852 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042957849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042957849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042957849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:57,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042957861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:57,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:57,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:57,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:57,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:57,968 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/5293710479ab4bd091134549807f1a81 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/5293710479ab4bd091134549807f1a81 2024-12-12T22:34:58,013 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into 5293710479ab4bd091134549807f1a81(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:58,013 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:58,013 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=13, startTime=1734042897436; duration=0sec 2024-12-12T22:34:58,014 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:58,014 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:58,033 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042958155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042958158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042958164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042958167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/61d6612002e8435682c29045d9416b57 2024-12-12T22:34:58,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 2024-12-12T22:34:58,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T22:34:58,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/74458a0cedfa4617a58594de8d9876d7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7 2024-12-12T22:34:58,227 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T22:34:58,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/61d6612002e8435682c29045d9416b57 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57 2024-12-12T22:34:58,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57, entries=150, sequenceid=328, filesize=12.0 K 2024-12-12T22:34:58,267 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3292c08f1e2fe18d3fcbb52f186614f5 in 778ms, sequenceid=328, compaction requested=false 2024-12-12T22:34:58,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:58,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:58,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:34:58,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:58,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:58,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:58,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:58,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:58,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:58,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6b60850da8734fa8b5f0487dc5db8666 is 50, key is test_row_0/A:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:58,386 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741905_1081 (size=12301) 2024-12-12T22:34:58,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6b60850da8734fa8b5f0487dc5db8666 2024-12-12T22:34:58,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/eec6d1f9a2e84d5697c888821ba61009 is 50, key is test_row_0/B:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:58,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:34:58,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741906_1082 (size=12301) 2024-12-12T22:34:58,544 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,545 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,570 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042958570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042958672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042958671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042958673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042958673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042958677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,858 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:58,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:58,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:58,860 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:58,884 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:58,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042958881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:58,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/eec6d1f9a2e84d5697c888821ba61009 2024-12-12T22:34:58,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/d48d9be9049344258ac19c6efc462e20 is 50, key is test_row_0/C:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:58,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741907_1083 (size=12301) 2024-12-12T22:34:58,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/d48d9be9049344258ac19c6efc462e20 2024-12-12T22:34:58,970 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6b60850da8734fa8b5f0487dc5db8666 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666 2024-12-12T22:34:58,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666, entries=150, sequenceid=342, filesize=12.0 K 2024-12-12T22:34:58,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/eec6d1f9a2e84d5697c888821ba61009 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009 2024-12-12T22:34:59,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,028 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009, entries=150, sequenceid=342, filesize=12.0 K 2024-12-12T22:34:59,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/d48d9be9049344258ac19c6efc462e20 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20 2024-12-12T22:34:59,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20, entries=150, sequenceid=342, filesize=12.0 K 2024-12-12T22:34:59,064 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3292c08f1e2fe18d3fcbb52f186614f5 in 707ms, sequenceid=342, compaction requested=true 2024-12-12T22:34:59,064 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:59,065 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:34:59,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:34:59,065 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:59,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:59,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:34:59,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/5293710479ab4bd091134549807f1a81, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=36.7 K 2024-12-12T22:34:59,073 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:59,073 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:34:59,074 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,074 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/17f10d9c36b141b487be611e4b6b1426, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=36.7 K 2024-12-12T22:34:59,075 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 17f10d9c36b141b487be611e4b6b1426, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:59,075 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5293710479ab4bd091134549807f1a81, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:59,078 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74458a0cedfa4617a58594de8d9876d7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734042897137 2024-12-12T22:34:59,078 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ff34c9e6e2b146cb9cb45f2a88a95aa6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734042897137 2024-12-12T22:34:59,079 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b60850da8734fa8b5f0487dc5db8666, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:34:59,079 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting eec6d1f9a2e84d5697c888821ba61009, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:34:59,145 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:59,146 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0a7618afa3f94abf979628785ab81e30 is 50, key is test_row_0/B:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:59,177 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:59,178 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0262c1875d3648acb92dc3160d498af2 is 50, key is test_row_0/A:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:59,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,197 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:34:59,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:34:59,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:59,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:34:59,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:59,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:34:59,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:34:59,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:34:59,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741908_1084 (size=13085) 2024-12-12T22:34:59,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/f1bf88b097ee4e39a57218cfcdb69dbe is 50, key is test_row_0/A:col10/1734042898534/Put/seqid=0 2024-12-12T22:34:59,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741909_1085 (size=13085) 2024-12-12T22:34:59,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741910_1086 (size=12301) 2024-12-12T22:34:59,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042959281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,358 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042959394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042959600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,630 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0a7618afa3f94abf979628785ab81e30 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0a7618afa3f94abf979628785ab81e30 2024-12-12T22:34:59,640 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into 0a7618afa3f94abf979628785ab81e30(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:59,640 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:59,640 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=13, startTime=1734042899065; duration=0sec 2024-12-12T22:34:59,642 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:34:59,642 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:34:59,642 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:34:59,643 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:34:59,643 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:34:59,644 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,644 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/f94a23ff520c43158e5c5a8892eaf75d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=36.7 K 2024-12-12T22:34:59,645 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f94a23ff520c43158e5c5a8892eaf75d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=301, earliestPutTs=1734042896424 2024-12-12T22:34:59,646 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61d6612002e8435682c29045d9416b57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734042897137 2024-12-12T22:34:59,647 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d48d9be9049344258ac19c6efc462e20, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:34:59,669 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,670 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/0262c1875d3648acb92dc3160d498af2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0262c1875d3648acb92dc3160d498af2 2024-12-12T22:34:59,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,680 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 0262c1875d3648acb92dc3160d498af2(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:34:59,680 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:34:59,680 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=13, startTime=1734042899065; duration=0sec 2024-12-12T22:34:59,680 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:34:59,680 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:34:59,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/f1bf88b097ee4e39a57218cfcdb69dbe 2024-12-12T22:34:59,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042959682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042959684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042959684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,695 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:34:59,696 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/5427824d54724e59b8b0894cae84d742 is 50, key is test_row_0/C:col10/1734042897520/Put/seqid=0 2024-12-12T22:34:59,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0f0e3400652f493aa7a50220e0fb81cc is 50, key is test_row_0/B:col10/1734042898534/Put/seqid=0 2024-12-12T22:34:59,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042959693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741912_1088 (size=13085) 2024-12-12T22:34:59,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741911_1087 (size=12301) 2024-12-12T22:34:59,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0f0e3400652f493aa7a50220e0fb81cc 2024-12-12T22:34:59,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7d120577695e420abb459b2b6cff0256 is 50, key is test_row_0/C:col10/1734042898534/Put/seqid=0 2024-12-12T22:34:59,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741913_1089 (size=12301) 2024-12-12T22:34:59,824 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:34:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042959907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:34:59,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:34:59,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:34:59,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:34:59,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:34:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:00,145 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:00,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:35:00,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:00,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:35:00,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:00,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:00,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:00,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:00,161 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/5427824d54724e59b8b0894cae84d742 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/5427824d54724e59b8b0894cae84d742 2024-12-12T22:35:00,185 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into 5427824d54724e59b8b0894cae84d742(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:00,185 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:00,185 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=13, startTime=1734042899065; duration=0sec 2024-12-12T22:35:00,185 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:00,185 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:35:00,208 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7d120577695e420abb459b2b6cff0256 2024-12-12T22:35:00,233 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/f1bf88b097ee4e39a57218cfcdb69dbe as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe 2024-12-12T22:35:00,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T22:35:00,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0f0e3400652f493aa7a50220e0fb81cc as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc 2024-12-12T22:35:00,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T22:35:00,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/7d120577695e420abb459b2b6cff0256 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256 2024-12-12T22:35:00,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256, entries=150, sequenceid=368, filesize=12.0 K 2024-12-12T22:35:00,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1085ms, sequenceid=368, compaction requested=false 2024-12-12T22:35:00,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:00,303 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:00,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-12T22:35:00,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:00,305 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:35:00,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:00,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/613aec3420af4c86a913eb0a3fcade7a is 50, key is test_row_0/A:col10/1734042899220/Put/seqid=0 2024-12-12T22:35:00,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741914_1090 (size=12301) 2024-12-12T22:35:00,418 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. as already flushing 2024-12-12T22:35:00,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:00,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:35:00,573 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:00,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042960567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:00,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:00,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042960684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:00,759 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/613aec3420af4c86a913eb0a3fcade7a 2024-12-12T22:35:00,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0e19193f8ebd43d0abd328053d0c1e2b is 50, key is test_row_0/B:col10/1734042899220/Put/seqid=0 2024-12-12T22:35:00,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741915_1091 (size=12301) 2024-12-12T22:35:00,800 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0e19193f8ebd43d0abd328053d0c1e2b 2024-12-12T22:35:00,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/35d062d333b840278a9cf32eb6aac34a is 50, key is test_row_0/C:col10/1734042899220/Put/seqid=0 2024-12-12T22:35:00,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741916_1092 (size=12301) 2024-12-12T22:35:00,887 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/35d062d333b840278a9cf32eb6aac34a 2024-12-12T22:35:00,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:00,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042960889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:00,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/613aec3420af4c86a913eb0a3fcade7a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a 2024-12-12T22:35:00,924 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T22:35:00,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/0e19193f8ebd43d0abd328053d0c1e2b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b 2024-12-12T22:35:00,961 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T22:35:00,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/35d062d333b840278a9cf32eb6aac34a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a 2024-12-12T22:35:00,981 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a, entries=150, sequenceid=381, filesize=12.0 K 2024-12-12T22:35:00,983 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 3292c08f1e2fe18d3fcbb52f186614f5 in 678ms, sequenceid=381, compaction requested=true 2024-12-12T22:35:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:00,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:00,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-12T22:35:00,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-12T22:35:00,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-12T22:35:00,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.6000 sec 2024-12-12T22:35:01,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 4.6240 sec 2024-12-12T22:35:01,205 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:35:01,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:35:01,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:01,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:35:01,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:01,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:35:01,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:01,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:01,228 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:35:01,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/b360f0e4071d4e049b798f10cfd736ff is 50, key is test_row_0/A:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:01,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042961270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741917_1093 (size=12301) 2024-12-12T22:35:01,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042961377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042961588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/b360f0e4071d4e049b798f10cfd736ff 2024-12-12T22:35:01,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49364 deadline: 1734042961705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,710 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:35:01,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49380 deadline: 1734042961707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,712 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4188 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:35:01,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49336 deadline: 1734042961707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,714 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4201 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:35:01,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ca66a597705843e5bb8db2ccff25195b is 50, key is test_row_0/B:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:01,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49342 deadline: 1734042961736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,745 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:35:01,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741918_1094 (size=12301) 2024-12-12T22:35:01,766 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ca66a597705843e5bb8db2ccff25195b 2024-12-12T22:35:01,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/bcc824e5d3c942b6ae4db85a6679e0fb is 50, key is test_row_0/C:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:01,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741919_1095 (size=12301) 2024-12-12T22:35:01,824 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/bcc824e5d3c942b6ae4db85a6679e0fb 2024-12-12T22:35:01,865 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:50645 2024-12-12T22:35:01,865 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:01,871 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10c964e8 to 127.0.0.1:50645 2024-12-12T22:35:01,871 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:01,873 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x527c6d40 to 127.0.0.1:50645 2024-12-12T22:35:01,874 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:01,881 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:50645 2024-12-12T22:35:01,881 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:01,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/b360f0e4071d4e049b798f10cfd736ff as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff 2024-12-12T22:35:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49374 deadline: 1734042961904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:01,920 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T22:35:01,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ca66a597705843e5bb8db2ccff25195b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b 2024-12-12T22:35:01,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T22:35:01,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/bcc824e5d3c942b6ae4db85a6679e0fb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb 2024-12-12T22:35:02,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb, entries=150, sequenceid=408, filesize=12.0 K 2024-12-12T22:35:02,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 3292c08f1e2fe18d3fcbb52f186614f5 in 818ms, sequenceid=408, compaction requested=true 2024-12-12T22:35:02,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:02,024 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:02,024 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:02,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:02,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:02,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3292c08f1e2fe18d3fcbb52f186614f5:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:02,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:02,033 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:02,033 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/B is initiating minor compaction (all files) 2024-12-12T22:35:02,033 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/B in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:02,034 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0a7618afa3f94abf979628785ab81e30, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=48.8 K 2024-12-12T22:35:02,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a7618afa3f94abf979628785ab81e30, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:35:02,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f0e3400652f493aa7a50220e0fb81cc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734042898534 2024-12-12T22:35:02,042 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:02,042 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/A is initiating minor compaction (all files) 2024-12-12T22:35:02,042 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/A in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:02,042 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0262c1875d3648acb92dc3160d498af2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=48.8 K 2024-12-12T22:35:02,043 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e19193f8ebd43d0abd328053d0c1e2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734042899220 2024-12-12T22:35:02,048 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0262c1875d3648acb92dc3160d498af2, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:35:02,049 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ca66a597705843e5bb8db2ccff25195b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734042900544 2024-12-12T22:35:02,050 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1bf88b097ee4e39a57218cfcdb69dbe, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734042898534 2024-12-12T22:35:02,052 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 613aec3420af4c86a913eb0a3fcade7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734042899220 2024-12-12T22:35:02,053 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b360f0e4071d4e049b798f10cfd736ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734042900544 2024-12-12T22:35:02,102 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:02,103 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/af37dcaa3a9e495d868944d66dae0006 is 50, key is test_row_0/B:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:02,106 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:02,110 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6bc0b2e269764acf98c27080d560ce63 is 50, key is test_row_0/A:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:02,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741920_1096 (size=13221) 2024-12-12T22:35:02,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741921_1097 (size=13221) 2024-12-12T22:35:02,417 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:50645 2024-12-12T22:35:02,418 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:02,582 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/af37dcaa3a9e495d868944d66dae0006 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/af37dcaa3a9e495d868944d66dae0006 2024-12-12T22:35:02,615 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/6bc0b2e269764acf98c27080d560ce63 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6bc0b2e269764acf98c27080d560ce63 2024-12-12T22:35:02,620 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/B of 3292c08f1e2fe18d3fcbb52f186614f5 into af37dcaa3a9e495d868944d66dae0006(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:02,620 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:02,620 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/B, priority=12, startTime=1734042902024; duration=0sec 2024-12-12T22:35:02,620 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:02,620 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:B 2024-12-12T22:35:02,620 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:02,639 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49988 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:02,640 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3292c08f1e2fe18d3fcbb52f186614f5/C is initiating minor compaction (all files) 2024-12-12T22:35:02,640 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3292c08f1e2fe18d3fcbb52f186614f5/C in TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:02,640 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/5427824d54724e59b8b0894cae84d742, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp, totalSize=48.8 K 2024-12-12T22:35:02,643 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5427824d54724e59b8b0894cae84d742, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1734042897510 2024-12-12T22:35:02,645 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d120577695e420abb459b2b6cff0256, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734042898534 2024-12-12T22:35:02,645 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/A of 3292c08f1e2fe18d3fcbb52f186614f5 into 6bc0b2e269764acf98c27080d560ce63(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:02,645 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:02,645 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/A, priority=12, startTime=1734042902024; duration=0sec 2024-12-12T22:35:02,646 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:02,646 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:A 2024-12-12T22:35:02,649 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 35d062d333b840278a9cf32eb6aac34a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734042899220 2024-12-12T22:35:02,651 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bcc824e5d3c942b6ae4db85a6679e0fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1734042900544 2024-12-12T22:35:02,676 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3292c08f1e2fe18d3fcbb52f186614f5#C#compaction#83 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:02,677 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/4f4e6052c6054a41aa01bb45b5401245 is 50, key is test_row_0/C:col10/1734042900544/Put/seqid=0 2024-12-12T22:35:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741922_1098 (size=13221) 2024-12-12T22:35:02,713 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/4f4e6052c6054a41aa01bb45b5401245 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/4f4e6052c6054a41aa01bb45b5401245 2024-12-12T22:35:02,725 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3292c08f1e2fe18d3fcbb52f186614f5/C of 3292c08f1e2fe18d3fcbb52f186614f5 into 4f4e6052c6054a41aa01bb45b5401245(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:02,725 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:02,725 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5., storeName=3292c08f1e2fe18d3fcbb52f186614f5/C, priority=12, startTime=1734042902027; duration=0sec 2024-12-12T22:35:02,726 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:02,726 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3292c08f1e2fe18d3fcbb52f186614f5:C 2024-12-12T22:35:04,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-12T22:35:04,504 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-12T22:35:05,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:05,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:35:05,729 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c43377 to 127.0.0.1:50645 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:35:05,729 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:35:05,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:05,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/02de7685b75847cc9bea238cba2c2891 is 50, key is test_row_0/A:col10/1734042901266/Put/seqid=0 2024-12-12T22:35:05,757 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a8f4734 to 127.0.0.1:50645 2024-12-12T22:35:05,757 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5095ba91 to 127.0.0.1:50645 2024-12-12T22:35:05,757 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:05,757 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:05,771 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:50645 2024-12-12T22:35:05,771 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 113 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 60 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 54 2024-12-12T22:35:05,772 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 1935 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 1834 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 894 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 2682 rows 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 896 2024-12-12T22:35:05,773 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 2688 rows 2024-12-12T22:35:05,773 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:35:05,773 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e67f019 to 127.0.0.1:50645 2024-12-12T22:35:05,773 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:05,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741923_1099 (size=12301) 2024-12-12T22:35:05,783 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:35:05,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/02de7685b75847cc9bea238cba2c2891 2024-12-12T22:35:05,800 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:35:05,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:05,820 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042905819"}]},"ts":"1734042905819"} 2024-12-12T22:35:05,823 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:35:05,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:05,839 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:35:05,842 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:35:05,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/21a25ca008934dd9808b0710ada1453c is 50, key is test_row_0/B:col10/1734042901266/Put/seqid=0 2024-12-12T22:35:05,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, UNASSIGN}] 2024-12-12T22:35:05,852 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=24, ppid=23, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, UNASSIGN 2024-12-12T22:35:05,853 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=24 updating hbase:meta row=3292c08f1e2fe18d3fcbb52f186614f5, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:05,855 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:35:05,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; CloseRegionProcedure 3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:05,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741924_1100 (size=12301) 2024-12-12T22:35:05,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/21a25ca008934dd9808b0710ada1453c 2024-12-12T22:35:05,902 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/24de865a68f341469e27e96621ce6488 is 50, key is test_row_0/C:col10/1734042901266/Put/seqid=0 2024-12-12T22:35:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:05,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741925_1101 (size=12301) 2024-12-12T22:35:05,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/24de865a68f341469e27e96621ce6488 2024-12-12T22:35:05,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/02de7685b75847cc9bea238cba2c2891 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/02de7685b75847cc9bea238cba2c2891 2024-12-12T22:35:05,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/02de7685b75847cc9bea238cba2c2891, entries=150, sequenceid=422, filesize=12.0 K 2024-12-12T22:35:06,000 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/21a25ca008934dd9808b0710ada1453c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/21a25ca008934dd9808b0710ada1453c 2024-12-12T22:35:06,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/21a25ca008934dd9808b0710ada1453c, entries=150, sequenceid=422, filesize=12.0 K 2024-12-12T22:35:06,017 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:06,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/24de865a68f341469e27e96621ce6488 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/24de865a68f341469e27e96621ce6488 2024-12-12T22:35:06,024 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(124): Close 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:06,024 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:35:06,026 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1681): Closing 3292c08f1e2fe18d3fcbb52f186614f5, disabling compactions & flushes 2024-12-12T22:35:06,026 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:06,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/24de865a68f341469e27e96621ce6488, entries=150, sequenceid=422, filesize=12.0 K 2024-12-12T22:35:06,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=20.13 KB/20610 for 3292c08f1e2fe18d3fcbb52f186614f5 in 352ms, sequenceid=422, compaction requested=false 2024-12-12T22:35:06,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:06,083 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:06,083 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:06,083 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. after waiting 0 ms 2024-12-12T22:35:06,084 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:06,084 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(2837): Flushing 3292c08f1e2fe18d3fcbb52f186614f5 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-12T22:35:06,087 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=A 2024-12-12T22:35:06,088 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:06,088 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=B 2024-12-12T22:35:06,088 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:06,088 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3292c08f1e2fe18d3fcbb52f186614f5, store=C 2024-12-12T22:35:06,088 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:06,119 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/966cffc0618f4c18a583681ce84f2de1 is 50, key is test_row_0/A:col10/1734042905747/Put/seqid=0 2024-12-12T22:35:06,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:06,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741926_1102 (size=9857) 2024-12-12T22:35:06,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:06,563 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/966cffc0618f4c18a583681ce84f2de1 2024-12-12T22:35:06,574 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ef634441925b46f79410b806216d7f5b is 50, key is test_row_0/B:col10/1734042905747/Put/seqid=0 2024-12-12T22:35:06,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741927_1103 (size=9857) 2024-12-12T22:35:06,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:06,979 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ef634441925b46f79410b806216d7f5b 2024-12-12T22:35:06,989 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/2c91fcebddf54350a6e4401e26104005 is 50, key is test_row_0/C:col10/1734042905747/Put/seqid=0 2024-12-12T22:35:07,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741928_1104 (size=9857) 2024-12-12T22:35:07,404 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=428 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/2c91fcebddf54350a6e4401e26104005 2024-12-12T22:35:07,411 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/A/966cffc0618f4c18a583681ce84f2de1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/966cffc0618f4c18a583681ce84f2de1 2024-12-12T22:35:07,423 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/966cffc0618f4c18a583681ce84f2de1, entries=100, sequenceid=428, filesize=9.6 K 2024-12-12T22:35:07,424 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/B/ef634441925b46f79410b806216d7f5b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ef634441925b46f79410b806216d7f5b 2024-12-12T22:35:07,444 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ef634441925b46f79410b806216d7f5b, entries=100, sequenceid=428, filesize=9.6 K 2024-12-12T22:35:07,447 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/.tmp/C/2c91fcebddf54350a6e4401e26104005 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/2c91fcebddf54350a6e4401e26104005 2024-12-12T22:35:07,462 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/2c91fcebddf54350a6e4401e26104005, entries=100, sequenceid=428, filesize=9.6 K 2024-12-12T22:35:07,467 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 3292c08f1e2fe18d3fcbb52f186614f5 in 1382ms, sequenceid=428, compaction requested=true 2024-12-12T22:35:07,474 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6956b55657d946339a366cda68de997e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0bfea1f44be5459987b24c0bcd862bcf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9138ca1519c54e5783fa5cff136a7dc8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/dba4dacb0c5145a68e8ea49a52ea30dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0dd19ec2deab493b964a35db46fb3d15, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/17f10d9c36b141b487be611e4b6b1426, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0262c1875d3648acb92dc3160d498af2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff] to archive 2024-12-12T22:35:07,479 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:07,496 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0bfea1f44be5459987b24c0bcd862bcf to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0bfea1f44be5459987b24c0bcd862bcf 2024-12-12T22:35:07,496 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6956b55657d946339a366cda68de997e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6956b55657d946339a366cda68de997e 2024-12-12T22:35:07,496 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/43ffbb4e5e774c17bb5ed00a24ed9740 2024-12-12T22:35:07,498 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/51475b4528894ac4aeaa79d4b6bf0825 2024-12-12T22:35:07,500 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ca38a87ed5934bdb9101822d09a0366e 2024-12-12T22:35:07,502 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/764749e6a9a84047b02697002d30a2e9 2024-12-12T22:35:07,502 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9b6884077a8049568a262c568ff60240 2024-12-12T22:35:07,502 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/28306e0f5899446981adb14fd837a0b6 2024-12-12T22:35:07,508 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9138ca1519c54e5783fa5cff136a7dc8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/9138ca1519c54e5783fa5cff136a7dc8 2024-12-12T22:35:07,513 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/c36e356386ba4dab819ba6579fe30312 2024-12-12T22:35:07,513 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/532d6dde08ed4fe38dd8841fe5c3d566 2024-12-12T22:35:07,513 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/a4dfab5a20f24970bf3ef63c9b6a03da 2024-12-12T22:35:07,513 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/fdd23cbfa7de41e69f3becae5985247d 2024-12-12T22:35:07,517 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/dba4dacb0c5145a68e8ea49a52ea30dd to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/dba4dacb0c5145a68e8ea49a52ea30dd 2024-12-12T22:35:07,519 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0dd19ec2deab493b964a35db46fb3d15 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0dd19ec2deab493b964a35db46fb3d15 2024-12-12T22:35:07,519 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/318525f07e1e4a2d8929ac4a3c6c1533 2024-12-12T22:35:07,520 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/96e250d25e3f4d8aa0f5d96a5b15b8d5 2024-12-12T22:35:07,521 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/639e7da10c394a068b0e840602ec1d78 2024-12-12T22:35:07,521 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0da202561851456bbedc784efe6f7404 2024-12-12T22:35:07,525 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0262c1875d3648acb92dc3160d498af2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/0262c1875d3648acb92dc3160d498af2 2024-12-12T22:35:07,525 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/aa06c34e773c455b989d1fa713017778 2024-12-12T22:35:07,526 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6b60850da8734fa8b5f0487dc5db8666 2024-12-12T22:35:07,526 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/ff34c9e6e2b146cb9cb45f2a88a95aa6 2024-12-12T22:35:07,526 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/17f10d9c36b141b487be611e4b6b1426 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/17f10d9c36b141b487be611e4b6b1426 2024-12-12T22:35:07,526 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/613aec3420af4c86a913eb0a3fcade7a 2024-12-12T22:35:07,526 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/f1bf88b097ee4e39a57218cfcdb69dbe 2024-12-12T22:35:07,528 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/b360f0e4071d4e049b798f10cfd736ff 2024-12-12T22:35:07,545 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/542a328dddf843c296998a52cebed323, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/da3221900558410582680aefbb694ab0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/dbf35eb5e4064abeb53467ffcf4af768, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4afba25a712140bda079f83ff7af94c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/cfd1d6128fba4af09fee7847e13d18c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/5293710479ab4bd091134549807f1a81, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0a7618afa3f94abf979628785ab81e30, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b] to archive 2024-12-12T22:35:07,546 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:07,554 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/513462e1f8c4415c9c529cba4a908219 2024-12-12T22:35:07,555 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/413ef4c641124a92ad7b93e237aff552 2024-12-12T22:35:07,556 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e3fc55c2728d40b9a99fde0a907dd197 2024-12-12T22:35:07,556 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/da3221900558410582680aefbb694ab0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/da3221900558410582680aefbb694ab0 2024-12-12T22:35:07,556 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/91d6fe970f2249cdb0ff90ad424730ef 2024-12-12T22:35:07,556 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/47e14be3a45249faba26494eb89a18c2 2024-12-12T22:35:07,557 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/542a328dddf843c296998a52cebed323 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/542a328dddf843c296998a52cebed323 2024-12-12T22:35:07,560 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e58508bec12b41d69930d1d7b986fa53 2024-12-12T22:35:07,565 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/dbf35eb5e4064abeb53467ffcf4af768 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/dbf35eb5e4064abeb53467ffcf4af768 2024-12-12T22:35:07,565 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/6c3951aa3653498588c85b2dd4825fa6 2024-12-12T22:35:07,566 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/c17f4b56c1e94644af659a8c3e3f789c 2024-12-12T22:35:07,569 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/3927a596aacb47329c8d95be388bac13 2024-12-12T22:35:07,570 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4afba25a712140bda079f83ff7af94c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4afba25a712140bda079f83ff7af94c0 2024-12-12T22:35:07,570 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/75007aa659b440a7b520c38a10e879bf 2024-12-12T22:35:07,570 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/4c6cf8204974471b9de9140d65bc1363 2024-12-12T22:35:07,571 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/d661468c1aca45fc968badfac293d136 2024-12-12T22:35:07,573 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/cfd1d6128fba4af09fee7847e13d18c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/cfd1d6128fba4af09fee7847e13d18c0 2024-12-12T22:35:07,573 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/42f9a6fe73c04ce5bcb88bcc079c4eb3 2024-12-12T22:35:07,573 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/8429f78553a042e7bdea57f1aef9acc6 2024-12-12T22:35:07,575 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/74458a0cedfa4617a58594de8d9876d7 2024-12-12T22:35:07,577 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/eec6d1f9a2e84d5697c888821ba61009 2024-12-12T22:35:07,580 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ca66a597705843e5bb8db2ccff25195b 2024-12-12T22:35:07,581 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0f0e3400652f493aa7a50220e0fb81cc 2024-12-12T22:35:07,581 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0a7618afa3f94abf979628785ab81e30 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0a7618afa3f94abf979628785ab81e30 2024-12-12T22:35:07,583 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/0e19193f8ebd43d0abd328053d0c1e2b 2024-12-12T22:35:07,586 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/5293710479ab4bd091134549807f1a81 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/5293710479ab4bd091134549807f1a81 2024-12-12T22:35:07,587 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/e7c939b3a577423d910f1555e00f52f2 2024-12-12T22:35:07,590 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fb9a1aa4213145b0aa67de4b0d66af6e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/a7e19e2c254546aca5fd2a53448e4310, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fdc46cebf5a24626a36a7173865baa65, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ece9f1d40bfe42eb95e709be191e28a3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/9c91a98b837f47298e085aab3d5680ef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/f94a23ff520c43158e5c5a8892eaf75d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/5427824d54724e59b8b0894cae84d742, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb] to archive 2024-12-12T22:35:07,592 DEBUG [StoreCloser-TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:07,596 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/10812344abce4fe69064d55e0e886d59 2024-12-12T22:35:07,596 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fb9a1aa4213145b0aa67de4b0d66af6e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fb9a1aa4213145b0aa67de4b0d66af6e 2024-12-12T22:35:07,597 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fe778f880dd64b9eb6ce6f9663c9524c 2024-12-12T22:35:07,602 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e19747da840e4e198d571133e35d48c6 2024-12-12T22:35:07,602 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3299ee5b05ba4788a9156889094ff8f9 2024-12-12T22:35:07,602 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/a7e19e2c254546aca5fd2a53448e4310 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/a7e19e2c254546aca5fd2a53448e4310 2024-12-12T22:35:07,603 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ac798b8565234e44b7e80af09015d1b2 2024-12-12T22:35:07,604 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/09d1cf323c5e4d799dfb83bdd1452474 2024-12-12T22:35:07,606 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7623c66d0a9c49b6aae1b07a949c5947 2024-12-12T22:35:07,606 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/3cfcac590db74b74bf6e7445db516cc6 2024-12-12T22:35:07,606 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fdc46cebf5a24626a36a7173865baa65 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/fdc46cebf5a24626a36a7173865baa65 2024-12-12T22:35:07,607 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/83b9578de899427985d3e3e6f5a29738 2024-12-12T22:35:07,610 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ece9f1d40bfe42eb95e709be191e28a3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/ece9f1d40bfe42eb95e709be191e28a3 2024-12-12T22:35:07,610 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/66f4d25570094a67aca51c77d9083777 2024-12-12T22:35:07,612 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/9c91a98b837f47298e085aab3d5680ef to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/9c91a98b837f47298e085aab3d5680ef 2024-12-12T22:35:07,612 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/719c44abf2224b10838c7b0c780d5296 2024-12-12T22:35:07,612 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0e0a90e658544e6396f98febd7671a07 2024-12-12T22:35:07,613 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/e037b350c44c4587ac79557ecc22cdcb 2024-12-12T22:35:07,612 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/0dcfe8f206d84b5483bbf69e5f012ce0 2024-12-12T22:35:07,614 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/f94a23ff520c43158e5c5a8892eaf75d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/f94a23ff520c43158e5c5a8892eaf75d 2024-12-12T22:35:07,619 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/5427824d54724e59b8b0894cae84d742 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/5427824d54724e59b8b0894cae84d742 2024-12-12T22:35:07,619 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/35d062d333b840278a9cf32eb6aac34a 2024-12-12T22:35:07,620 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/7d120577695e420abb459b2b6cff0256 2024-12-12T22:35:07,620 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/d48d9be9049344258ac19c6efc462e20 2024-12-12T22:35:07,620 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/729af903505544c5b46401c3819297b1 2024-12-12T22:35:07,622 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/61d6612002e8435682c29045d9416b57 2024-12-12T22:35:07,624 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/bcc824e5d3c942b6ae4db85a6679e0fb 2024-12-12T22:35:07,637 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/recovered.edits/431.seqid, newMaxSeqId=431, maxSeqId=1 2024-12-12T22:35:07,643 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5. 2024-12-12T22:35:07,643 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] regionserver.HRegion(1635): Region close journal for 3292c08f1e2fe18d3fcbb52f186614f5: 2024-12-12T22:35:07,647 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=25}] handler.UnassignRegionHandler(170): Closed 3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:07,655 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=24 updating hbase:meta row=3292c08f1e2fe18d3fcbb52f186614f5, regionState=CLOSED 2024-12-12T22:35:07,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-12T22:35:07,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; CloseRegionProcedure 3292c08f1e2fe18d3fcbb52f186614f5, server=1aef280cf0a8,36025,1734042873576 in 1.8090 sec 2024-12-12T22:35:07,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-12T22:35:07,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3292c08f1e2fe18d3fcbb52f186614f5, UNASSIGN in 1.8200 sec 2024-12-12T22:35:07,678 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-12T22:35:07,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8340 sec 2024-12-12T22:35:07,682 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042907682"}]},"ts":"1734042907682"} 2024-12-12T22:35:07,693 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:35:07,713 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:35:07,716 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9120 sec 2024-12-12T22:35:07,731 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-12T22:35:07,736 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44098, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-12T22:35:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-12T22:35:07,948 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-12T22:35:07,961 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:35:07,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:07,995 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=26, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:07,999 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=26, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:08,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T22:35:08,011 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:08,048 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/recovered.edits] 2024-12-12T22:35:08,075 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/02de7685b75847cc9bea238cba2c2891 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/02de7685b75847cc9bea238cba2c2891 2024-12-12T22:35:08,075 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/966cffc0618f4c18a583681ce84f2de1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/966cffc0618f4c18a583681ce84f2de1 2024-12-12T22:35:08,076 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6bc0b2e269764acf98c27080d560ce63 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/A/6bc0b2e269764acf98c27080d560ce63 2024-12-12T22:35:08,108 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/af37dcaa3a9e495d868944d66dae0006 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/af37dcaa3a9e495d868944d66dae0006 2024-12-12T22:35:08,108 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ef634441925b46f79410b806216d7f5b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/ef634441925b46f79410b806216d7f5b 2024-12-12T22:35:08,108 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/21a25ca008934dd9808b0710ada1453c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/B/21a25ca008934dd9808b0710ada1453c 2024-12-12T22:35:08,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T22:35:08,120 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/24de865a68f341469e27e96621ce6488 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/24de865a68f341469e27e96621ce6488 2024-12-12T22:35:08,120 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/4f4e6052c6054a41aa01bb45b5401245 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/4f4e6052c6054a41aa01bb45b5401245 2024-12-12T22:35:08,120 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/2c91fcebddf54350a6e4401e26104005 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/C/2c91fcebddf54350a6e4401e26104005 2024-12-12T22:35:08,128 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/recovered.edits/431.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5/recovered.edits/431.seqid 2024-12-12T22:35:08,129 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3292c08f1e2fe18d3fcbb52f186614f5 2024-12-12T22:35:08,129 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:35:08,134 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=26, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:08,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-12T22:35:08,158 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:35:08,235 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:35:08,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=26, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:08,256 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:35:08,257 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734042908256"}]},"ts":"9223372036854775807"} 2024-12-12T22:35:08,262 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:35:08,262 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3292c08f1e2fe18d3fcbb52f186614f5, NAME => 'TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:35:08,262 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:35:08,263 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734042908262"}]},"ts":"9223372036854775807"} 2024-12-12T22:35:08,269 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:35:08,288 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=26, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:08,296 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 326 msec 2024-12-12T22:35:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-12T22:35:08,317 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-12T22:35:08,335 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=244 (was 218) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4fe961e3-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1098446419_22 at /127.0.0.1:56246 [Waiting for operation #25] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4fe961e3-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;1aef280cf0a8:36025-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4fe961e3-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4fe961e3-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1350 (was 899) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3541 (was 6826) 2024-12-12T22:35:08,350 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=1350, ProcessCount=11, AvailableMemoryMB=3540 2024-12-12T22:35:08,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:35:08,353 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:35:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=27, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:08,368 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:35:08,368 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:08,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 27 2024-12-12T22:35:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=27 2024-12-12T22:35:08,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:35:08,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741929_1105 (size=963) 2024-12-12T22:35:08,395 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:35:08,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741930_1106 (size=53) 2024-12-12T22:35:08,408 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:35:08,408 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8c758bf11da38bc1c9062a0f3e05f513, disabling compactions & flushes 2024-12-12T22:35:08,408 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,408 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,409 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. after waiting 0 ms 2024-12-12T22:35:08,409 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,409 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,409 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:08,410 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:35:08,410 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734042908410"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042908410"}]},"ts":"1734042908410"} 2024-12-12T22:35:08,412 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:35:08,413 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:35:08,413 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042908413"}]},"ts":"1734042908413"} 2024-12-12T22:35:08,414 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:35:08,429 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, ASSIGN}] 2024-12-12T22:35:08,431 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, ASSIGN 2024-12-12T22:35:08,431 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:35:08,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=27 2024-12-12T22:35:08,582 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:08,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; OpenRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=27 2024-12-12T22:35:08,736 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:08,746 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,746 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7285): Opening region: {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:35:08,746 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,746 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:35:08,747 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7327): checking encryption for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,747 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7330): checking classloading for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,748 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,750 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:08,750 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName A 2024-12-12T22:35:08,750 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:08,751 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:08,751 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,753 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:08,754 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName B 2024-12-12T22:35:08,754 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:08,755 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:08,755 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,758 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:08,758 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName C 2024-12-12T22:35:08,758 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:08,759 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:08,759 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,763 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,764 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,766 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:35:08,779 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1085): writing seq id for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:08,787 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:35:08,788 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1102): Opened 8c758bf11da38bc1c9062a0f3e05f513; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59715826, jitterRate=-0.11016485095024109}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:35:08,789 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1001): Region open journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:08,791 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., pid=29, masterSystemTime=1734042908736 2024-12-12T22:35:08,795 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,795 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:08,796 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:08,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-12T22:35:08,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; OpenRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 in 214 msec 2024-12-12T22:35:08,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=27 2024-12-12T22:35:08,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=27, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, ASSIGN in 375 msec 2024-12-12T22:35:08,808 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:35:08,809 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042908808"}]},"ts":"1734042908808"} 2024-12-12T22:35:08,810 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:35:08,822 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=27, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:35:08,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 469 msec 2024-12-12T22:35:08,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=27 2024-12-12T22:35:08,973 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 27 completed 2024-12-12T22:35:08,976 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a378df6 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cca453a 2024-12-12T22:35:08,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@350b322d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:08,990 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:08,996 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59136, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:08,998 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:35:09,000 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44100, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:35:09,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:35:09,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:35:09,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:09,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741931_1107 (size=999) 2024-12-12T22:35:09,435 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T22:35:09,435 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T22:35:09,440 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:35:09,450 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, REOPEN/MOVE}] 2024-12-12T22:35:09,451 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, REOPEN/MOVE 2024-12-12T22:35:09,452 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:09,453 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:35:09,453 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:09,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:09,607 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,607 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:35:09,607 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 8c758bf11da38bc1c9062a0f3e05f513, disabling compactions & flushes 2024-12-12T22:35:09,607 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:09,608 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:09,608 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. after waiting 0 ms 2024-12-12T22:35:09,608 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:09,612 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T22:35:09,613 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:09,613 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:09,613 WARN [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionServer(3786): Not adding moved region record: 8c758bf11da38bc1c9062a0f3e05f513 to self. 2024-12-12T22:35:09,614 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,615 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=CLOSED 2024-12-12T22:35:09,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-12T22:35:09,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 in 163 msec 2024-12-12T22:35:09,618 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, REOPEN/MOVE; state=CLOSED, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=true 2024-12-12T22:35:09,770 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:09,775 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:09,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:09,948 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:09,948 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:35:09,949 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,949 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:35:09,949 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,949 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,961 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,968 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:09,975 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName A 2024-12-12T22:35:09,983 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:09,986 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:09,987 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,990 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:09,991 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName B 2024-12-12T22:35:09,991 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:09,996 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:09,996 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:09,999 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:09,999 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c758bf11da38bc1c9062a0f3e05f513 columnFamilyName C 2024-12-12T22:35:09,999 DEBUG [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:10,001 INFO [StoreOpener-8c758bf11da38bc1c9062a0f3e05f513-1 {}] regionserver.HStore(327): Store=8c758bf11da38bc1c9062a0f3e05f513/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:10,001 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:10,004 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:10,013 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:10,017 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:35:10,037 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:10,044 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened 8c758bf11da38bc1c9062a0f3e05f513; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74547947, jitterRate=0.1108509749174118}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:35:10,047 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:10,055 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., pid=34, masterSystemTime=1734042909930 2024-12-12T22:35:10,063 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:10,063 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:10,067 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=OPEN, openSeqNum=5, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:10,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-12T22:35:10,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 in 300 msec 2024-12-12T22:35:10,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-12T22:35:10,082 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, REOPEN/MOVE in 628 msec 2024-12-12T22:35:10,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-12T22:35:10,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 644 msec 2024-12-12T22:35:10,094 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 1.0780 sec 2024-12-12T22:35:10,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-12T22:35:10,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4c7d6279 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@433e2b26 2024-12-12T22:35:10,214 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b4bd1ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,216 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b55744e to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@176c5c1b 2024-12-12T22:35:10,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ebda6ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,250 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a9306be to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24f64590 2024-12-12T22:35:10,462 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19a533a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,465 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x769942d9 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c5c4716 2024-12-12T22:35:10,779 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367f47f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,780 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x00cb464a to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68f0be85 2024-12-12T22:35:10,827 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247c0c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,829 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x152377d4 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@517ff977 2024-12-12T22:35:10,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b727d6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,858 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-12-12T22:35:10,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,917 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-12-12T22:35:10,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:10,953 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7af61386 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a7e1dd 2024-12-12T22:35:10,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630684bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:11,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:11,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=35, table=TestAcidGuarantees 2024-12-12T22:35:11,022 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=35, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=35, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:11,025 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=35, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=35, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:11,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:11,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:11,042 DEBUG [hconnection-0x7ff3aacc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,044 DEBUG [hconnection-0x1bf01718-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,046 DEBUG [hconnection-0x14d69165-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,046 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41210, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,048 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,052 DEBUG [hconnection-0x7b62f6ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,054 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,056 DEBUG [hconnection-0x3e57955e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,057 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,057 DEBUG [hconnection-0x668cb76e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,059 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,059 DEBUG [hconnection-0x6b038333-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,060 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41268, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,060 DEBUG [hconnection-0x34c2a382-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,063 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,064 DEBUG [hconnection-0x21b61ef9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:11,072 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41290, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,076 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41296, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:11,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:11,083 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:11,091 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:11,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:11,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:11,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:11,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042971121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,130 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042971125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042971128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042971129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042971129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:11,183 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,188 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042971233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042971235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042971234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042971235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042971236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121ce0a3cb709642d2844f6509fc6334ae_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042911080/Put/seqid=0 2024-12-12T22:35:11,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741932_1108 (size=12154) 2024-12-12T22:35:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:11,351 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042971442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042971441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042971445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042971443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042971447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:11,668 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,712 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:11,727 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412121ce0a3cb709642d2844f6509fc6334ae_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121ce0a3cb709642d2844f6509fc6334ae_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:11,732 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/3d65b371282c4246b44467c268f675c4, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:11,751 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/3d65b371282c4246b44467c268f675c4 is 175, key is test_row_0/A:col10/1734042911080/Put/seqid=0 2024-12-12T22:35:11,752 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042971748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042971752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042971763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,781 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042971764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:11,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042971767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741933_1109 (size=30955) 2024-12-12T22:35:11,822 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,826 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=19, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/3d65b371282c4246b44467c268f675c4 2024-12-12T22:35:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,827 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,915 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2695466bb6bf4259b223fe62b9e273ad is 50, key is test_row_0/B:col10/1734042911080/Put/seqid=0 2024-12-12T22:35:11,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741934_1110 (size=12001) 2024-12-12T22:35:11,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:11,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:11,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:11,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:11,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2695466bb6bf4259b223fe62b9e273ad 2024-12-12T22:35:12,108 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2cfa68c959bb46f6b5382363b3a99446 is 50, key is test_row_0/C:col10/1734042911080/Put/seqid=0 2024-12-12T22:35:12,143 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:12,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741935_1111 (size=12001) 2024-12-12T22:35:12,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=19 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2cfa68c959bb46f6b5382363b3a99446 2024-12-12T22:35:12,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/3d65b371282c4246b44467c268f675c4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4 2024-12-12T22:35:12,234 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4, entries=150, sequenceid=19, filesize=30.2 K 2024-12-12T22:35:12,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2695466bb6bf4259b223fe62b9e273ad as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad 2024-12-12T22:35:12,260 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad, entries=150, sequenceid=19, filesize=11.7 K 2024-12-12T22:35:12,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2cfa68c959bb46f6b5382363b3a99446 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446 2024-12-12T22:35:12,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446, entries=150, sequenceid=19, filesize=11.7 K 2024-12-12T22:35:12,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 8c758bf11da38bc1c9062a0f3e05f513 in 1197ms, sequenceid=19, compaction requested=false 2024-12-12T22:35:12,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:12,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:12,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:35:12,301 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:12,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:12,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:12,352 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:12,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121258da9a33bfca40eaa9cb3ea8db0b5da1_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042911124/Put/seqid=0 2024-12-12T22:35:12,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042972401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042972399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042972404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042972415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042972421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741936_1112 (size=12154) 2024-12-12T22:35:12,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,470 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:12,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,483 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121258da9a33bfca40eaa9cb3ea8db0b5da1_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258da9a33bfca40eaa9cb3ea8db0b5da1_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:12,497 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/1e073f3f2d434381a955b15d42eae04a, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:12,498 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/1e073f3f2d434381a955b15d42eae04a is 175, key is test_row_0/A:col10/1734042911124/Put/seqid=0 2024-12-12T22:35:12,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042972533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042972536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741937_1113 (size=30955) 2024-12-12T22:35:12,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042972537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042972546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042972550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,556 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/1e073f3f2d434381a955b15d42eae04a 2024-12-12T22:35:12,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/39fb24d1b59d4f37a2733a4ad53719ed is 50, key is test_row_0/B:col10/1734042911124/Put/seqid=0 2024-12-12T22:35:12,641 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:12,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741938_1114 (size=12001) 2024-12-12T22:35:12,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/39fb24d1b59d4f37a2733a4ad53719ed 2024-12-12T22:35:12,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2eacf46601a447f2b145df5dbd788768 is 50, key is test_row_0/C:col10/1734042911124/Put/seqid=0 2024-12-12T22:35:12,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042972760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042972760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042972761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042972760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:12,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042972761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,807 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:12,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] handler.RSProcedureHandler(58): pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=36 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=36 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741939_1115 (size=12001) 2024-12-12T22:35:12,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2eacf46601a447f2b145df5dbd788768 2024-12-12T22:35:12,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/1e073f3f2d434381a955b15d42eae04a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a 2024-12-12T22:35:12,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a, entries=150, sequenceid=43, filesize=30.2 K 2024-12-12T22:35:12,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/39fb24d1b59d4f37a2733a4ad53719ed as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed 2024-12-12T22:35:12,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed, entries=150, sequenceid=43, filesize=11.7 K 2024-12-12T22:35:12,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2eacf46601a447f2b145df5dbd788768 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768 2024-12-12T22:35:12,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768, entries=150, sequenceid=43, filesize=11.7 K 2024-12-12T22:35:12,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8c758bf11da38bc1c9062a0f3e05f513 in 592ms, sequenceid=43, compaction requested=false 2024-12-12T22:35:12,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:12,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:12,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=36 2024-12-12T22:35:12,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:12,971 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:12,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:13,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127d8870419f4d4617bcd6249afe3214ac_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042912401/Put/seqid=0 2024-12-12T22:35:13,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:13,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741940_1116 (size=12154) 2024-12-12T22:35:13,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:13,119 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127d8870419f4d4617bcd6249afe3214ac_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d8870419f4d4617bcd6249afe3214ac_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:13,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/532f50d51a12459abe878667a5144a7c, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:13,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/532f50d51a12459abe878667a5144a7c is 175, key is test_row_0/A:col10/1734042912401/Put/seqid=0 2024-12-12T22:35:13,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741941_1117 (size=30955) 2024-12-12T22:35:13,187 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/532f50d51a12459abe878667a5144a7c 2024-12-12T22:35:13,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042973203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042973205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,237 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:35:13,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042973211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042973213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042973216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/84027398ce6146af9ebdd6ceb4efea47 is 50, key is test_row_0/B:col10/1734042912401/Put/seqid=0 2024-12-12T22:35:13,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042973358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042973359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042973360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042973359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042973360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741942_1118 (size=12001) 2024-12-12T22:35:13,574 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042973568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042973568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,576 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042973572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042973583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042973592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,795 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/84027398ce6146af9ebdd6ceb4efea47 2024-12-12T22:35:13,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/77af206244e349e1ab002c1a1e9022d1 is 50, key is test_row_0/C:col10/1734042912401/Put/seqid=0 2024-12-12T22:35:13,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042973885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042973886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042973888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042973890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741943_1119 (size=12001) 2024-12-12T22:35:13,900 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/77af206244e349e1ab002c1a1e9022d1 2024-12-12T22:35:13,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042973900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:13,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/532f50d51a12459abe878667a5144a7c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c 2024-12-12T22:35:13,927 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c, entries=150, sequenceid=55, filesize=30.2 K 2024-12-12T22:35:13,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/84027398ce6146af9ebdd6ceb4efea47 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47 2024-12-12T22:35:13,954 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T22:35:13,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/77af206244e349e1ab002c1a1e9022d1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1 2024-12-12T22:35:13,968 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1, entries=150, sequenceid=55, filesize=11.7 K 2024-12-12T22:35:13,969 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8c758bf11da38bc1c9062a0f3e05f513 in 998ms, sequenceid=55, compaction requested=true 2024-12-12T22:35:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:13,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-12T22:35:13,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-12T22:35:13,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-12T22:35:13,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9540 sec 2024-12-12T22:35:14,003 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=35, table=TestAcidGuarantees in 2.9800 sec 2024-12-12T22:35:14,401 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:14,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:14,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:14,454 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a5785803d0e147189bfcb0eded2e9b2f_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:14,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042974444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042974446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042974453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042974456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042974458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741944_1120 (size=12154) 2024-12-12T22:35:14,494 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:14,510 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212a5785803d0e147189bfcb0eded2e9b2f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5785803d0e147189bfcb0eded2e9b2f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:14,518 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4fef373f767c4a9890e7c322af369fae, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:14,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4fef373f767c4a9890e7c322af369fae is 175, key is test_row_0/A:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:14,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042974564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042974564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042974567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042974567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042974571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741945_1121 (size=30955) 2024-12-12T22:35:14,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042974780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,786 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042974780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,787 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,788 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042974781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:14,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042974782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042974780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:14,984 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=80, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4fef373f767c4a9890e7c322af369fae 2024-12-12T22:35:15,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2876b56e97514cb28f1eec9e97282b98 is 50, key is test_row_0/B:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:15,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741946_1122 (size=12001) 2024-12-12T22:35:15,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042975092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042975092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042975092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042975096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042975099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-12T22:35:15,161 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-12-12T22:35:15,195 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=37, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees 2024-12-12T22:35:15,201 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=37, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:15,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:15,202 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=37, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:15,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:15,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:15,355 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-12T22:35:15,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:15,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] handler.RSProcedureHandler(58): pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=38 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2876b56e97514cb28f1eec9e97282b98 2024-12-12T22:35:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:15,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d35dd6539b754d53baa395ad8ff75e65 is 50, key is test_row_0/C:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:15,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-12T22:35:15,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:15,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,529 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] handler.RSProcedureHandler(58): pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=38 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741947_1123 (size=12001) 2024-12-12T22:35:15,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=80 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d35dd6539b754d53baa395ad8ff75e65 2024-12-12T22:35:15,590 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4fef373f767c4a9890e7c322af369fae as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae 2024-12-12T22:35:15,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042975605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042975611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042975612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042975612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:15,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042975615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,625 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae, entries=150, sequenceid=80, filesize=30.2 K 2024-12-12T22:35:15,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2876b56e97514cb28f1eec9e97282b98 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98 2024-12-12T22:35:15,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98, entries=150, sequenceid=80, filesize=11.7 K 2024-12-12T22:35:15,673 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d35dd6539b754d53baa395ad8ff75e65 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65 2024-12-12T22:35:15,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,700 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-12T22:35:15,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:15,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,700 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] handler.RSProcedureHandler(58): pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=38 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=38 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:15,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65, entries=150, sequenceid=80, filesize=11.7 K 2024-12-12T22:35:15,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8c758bf11da38bc1c9062a0f3e05f513 in 1312ms, sequenceid=80, compaction requested=true 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:15,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:35:15,714 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:15,714 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:15,731 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 123820 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:15,731 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:15,732 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,732 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=120.9 K 2024-12-12T22:35:15,732 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,732 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae] 2024-12-12T22:35:15,733 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:15,733 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:15,733 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,733 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=46.9 K 2024-12-12T22:35:15,734 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d65b371282c4246b44467c268f675c4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1734042911076 2024-12-12T22:35:15,735 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cfa68c959bb46f6b5382363b3a99446, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1734042911076 2024-12-12T22:35:15,735 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e073f3f2d434381a955b15d42eae04a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734042911124 2024-12-12T22:35:15,736 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2eacf46601a447f2b145df5dbd788768, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734042911124 2024-12-12T22:35:15,737 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 77af206244e349e1ab002c1a1e9022d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734042912381 2024-12-12T22:35:15,737 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 532f50d51a12459abe878667a5144a7c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734042912381 2024-12-12T22:35:15,737 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d35dd6539b754d53baa395ad8ff75e65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:15,738 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fef373f767c4a9890e7c322af369fae, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:15,792 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:15,794 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#102 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:15,795 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/faf510f22ef0436f8d996c992d87c967 is 50, key is test_row_0/C:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:15,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:15,816 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212e19ba9a3235c456a81077c0d35cd8695_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:15,824 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212e19ba9a3235c456a81077c0d35cd8695_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:15,825 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e19ba9a3235c456a81077c0d35cd8695_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:15,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:15,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=38 2024-12-12T22:35:15,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,858 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:15,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:15,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:15,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:15,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741948_1124 (size=12139) 2024-12-12T22:35:15,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741949_1125 (size=4469) 2024-12-12T22:35:15,891 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/faf510f22ef0436f8d996c992d87c967 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/faf510f22ef0436f8d996c992d87c967 2024-12-12T22:35:15,895 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#103 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:15,898 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/61fa2c92053644d8b567441b4c45f177 is 175, key is test_row_0/A:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:15,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121211be312bb9204139ba9b7778f73039d1_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042914402/Put/seqid=0 2024-12-12T22:35:15,916 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into faf510f22ef0436f8d996c992d87c967(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:15,916 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:15,916 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=12, startTime=1734042915712; duration=0sec 2024-12-12T22:35:15,916 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:15,916 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:15,916 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:15,920 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:15,920 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:15,921 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:15,921 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=46.9 K 2024-12-12T22:35:15,925 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2695466bb6bf4259b223fe62b9e273ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=19, earliestPutTs=1734042911076 2024-12-12T22:35:15,926 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 39fb24d1b59d4f37a2733a4ad53719ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1734042911124 2024-12-12T22:35:15,928 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 84027398ce6146af9ebdd6ceb4efea47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1734042912381 2024-12-12T22:35:15,931 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2876b56e97514cb28f1eec9e97282b98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:15,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741950_1126 (size=31093) 2024-12-12T22:35:15,979 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#105 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:15,980 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/08b7669939974cc28802c5c22fe10427 is 50, key is test_row_0/B:col10/1734042913090/Put/seqid=0 2024-12-12T22:35:16,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741951_1127 (size=12154) 2024-12-12T22:35:16,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:16,027 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121211be312bb9204139ba9b7778f73039d1_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211be312bb9204139ba9b7778f73039d1_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:16,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/7929e70cc9dd48d28eb1f923696ded97, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:16,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/7929e70cc9dd48d28eb1f923696ded97 is 175, key is test_row_0/A:col10/1734042914402/Put/seqid=0 2024-12-12T22:35:16,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741952_1128 (size=12139) 2024-12-12T22:35:16,071 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/08b7669939974cc28802c5c22fe10427 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/08b7669939974cc28802c5c22fe10427 2024-12-12T22:35:16,090 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into 08b7669939974cc28802c5c22fe10427(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:16,090 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:16,090 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=12, startTime=1734042915712; duration=0sec 2024-12-12T22:35:16,090 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:16,090 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:16,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741953_1129 (size=30955) 2024-12-12T22:35:16,113 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/7929e70cc9dd48d28eb1f923696ded97 2024-12-12T22:35:16,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e1ea9ca576f941c3acfdd83515222ebe is 50, key is test_row_0/B:col10/1734042914402/Put/seqid=0 2024-12-12T22:35:16,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741954_1130 (size=12001) 2024-12-12T22:35:16,187 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e1ea9ca576f941c3acfdd83515222ebe 2024-12-12T22:35:16,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/71f870bcea23401a904d21a30310b16a is 50, key is test_row_0/C:col10/1734042914402/Put/seqid=0 2024-12-12T22:35:16,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741955_1131 (size=12001) 2024-12-12T22:35:16,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:16,376 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/61fa2c92053644d8b567441b4c45f177 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177 2024-12-12T22:35:16,392 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into 61fa2c92053644d8b567441b4c45f177(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:16,392 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:16,392 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=12, startTime=1734042915712; duration=0sec 2024-12-12T22:35:16,392 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:16,392 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:16,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:16,668 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/71f870bcea23401a904d21a30310b16a 2024-12-12T22:35:16,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/7929e70cc9dd48d28eb1f923696ded97 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97 2024-12-12T22:35:16,700 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97, entries=150, sequenceid=91, filesize=30.2 K 2024-12-12T22:35:16,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e1ea9ca576f941c3acfdd83515222ebe as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe 2024-12-12T22:35:16,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042976706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042976711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042976716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042976716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,729 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T22:35:16,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042976721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/71f870bcea23401a904d21a30310b16a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a 2024-12-12T22:35:16,753 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a, entries=150, sequenceid=91, filesize=11.7 K 2024-12-12T22:35:16,757 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 8c758bf11da38bc1c9062a0f3e05f513 in 899ms, sequenceid=91, compaction requested=false 2024-12-12T22:35:16,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:16,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:16,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=38}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=38 2024-12-12T22:35:16,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=38 2024-12-12T22:35:16,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-12T22:35:16,762 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5580 sec 2024-12-12T22:35:16,765 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=37, table=TestAcidGuarantees in 1.5680 sec 2024-12-12T22:35:16,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:16,836 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:35:16,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:16,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:16,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:16,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:16,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:16,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:16,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042976855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042976857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042976868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042976859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042976872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e692ff3fb6fd483985857060e5d75177_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:16,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741956_1132 (size=12154) 2024-12-12T22:35:16,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042976970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042976971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,991 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042976977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,992 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042976988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:16,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:16,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042976992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042977177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042977180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042977193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042977202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042977208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=37 2024-12-12T22:35:17,309 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 37 completed 2024-12-12T22:35:17,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:17,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-12-12T22:35:17,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:17,326 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:17,327 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:17,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:17,341 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,352 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e692ff3fb6fd483985857060e5d75177_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e692ff3fb6fd483985857060e5d75177_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:17,354 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/2c841c05ecb44fa1a09dc3a7ed6c4d41, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 is 175, key is test_row_0/A:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741957_1133 (size=30955) 2024-12-12T22:35:17,401 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=123, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 2024-12-12T22:35:17,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/3bdee010924745b9b7b7618679dbc053 is 50, key is test_row_0/B:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:17,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741958_1134 (size=12001) 2024-12-12T22:35:17,479 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/3bdee010924745b9b7b7618679dbc053 2024-12-12T22:35:17,484 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T22:35:17,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:17,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,487 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:17,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:17,491 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042977488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:17,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042977491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042977504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,508 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042977506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:17,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042977512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/42a0115ee15548beb659dce2142309a1 is 50, key is test_row_0/C:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741959_1135 (size=12001) 2024-12-12T22:35:17,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/42a0115ee15548beb659dce2142309a1 2024-12-12T22:35:17,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 2024-12-12T22:35:17,603 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41, entries=150, sequenceid=123, filesize=30.2 K 2024-12-12T22:35:17,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/3bdee010924745b9b7b7618679dbc053 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053 2024-12-12T22:35:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T22:35:17,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/42a0115ee15548beb659dce2142309a1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1 2024-12-12T22:35:17,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1, entries=150, sequenceid=123, filesize=11.7 K 2024-12-12T22:35:17,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:17,642 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 8c758bf11da38bc1c9062a0f3e05f513 in 805ms, sequenceid=123, compaction requested=true 2024-12-12T22:35:17,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:17,642 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:17,647 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:17,648 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:17,648 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:35:17,648 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:17,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-12T22:35:17,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,651 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:35:17,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:17,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:17,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:17,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:17,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:17,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:17,654 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:17,654 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:17,654 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,654 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/08b7669939974cc28802c5c22fe10427, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=35.3 K 2024-12-12T22:35:17,655 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:17,655 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:17,655 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,655 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=90.8 K 2024-12-12T22:35:17,655 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,655 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41] 2024-12-12T22:35:17,656 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b7669939974cc28802c5c22fe10427, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:17,663 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e1ea9ca576f941c3acfdd83515222ebe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734042914402 2024-12-12T22:35:17,663 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61fa2c92053644d8b567441b4c45f177, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:17,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,667 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7929e70cc9dd48d28eb1f923696ded97, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734042914402 2024-12-12T22:35:17,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,668 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bdee010924745b9b7b7618679dbc053, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:17,668 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c841c05ecb44fa1a09dc3a7ed6c4d41, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:17,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e280c7956c2749928efdbb271889b012_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_1/A:col10/1734042916863/Put/seqid=0 2024-12-12T22:35:17,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,717 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#112 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:17,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,717 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c0ceb99cce1243fe86cb68f1f60e6376 is 50, key is test_row_0/B:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,752 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741960_1136 (size=9764) 2024-12-12T22:35:17,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,776 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e280c7956c2749928efdbb271889b012_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e280c7956c2749928efdbb271889b012_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:17,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/fc9d638c53ad45c2b9dab57ec52bf656, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/fc9d638c53ad45c2b9dab57ec52bf656 is 175, key is test_row_1/A:col10/1734042916863/Put/seqid=0 2024-12-12T22:35:17,791 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412122516a957898a418388596289080192a0_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,796 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412122516a957898a418388596289080192a0_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,796 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122516a957898a418388596289080192a0_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:17,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741961_1137 (size=12241) 2024-12-12T22:35:17,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,848 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c0ceb99cce1243fe86cb68f1f60e6376 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c0ceb99cce1243fe86cb68f1f60e6376 2024-12-12T22:35:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741962_1138 (size=22411) 2024-12-12T22:35:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741963_1139 (size=4469) 2024-12-12T22:35:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,868 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#113 average throughput is 0.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,869 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c60b137474e949039afbb03d4ca50254 is 175, key is test_row_0/A:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,872 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into c0ceb99cce1243fe86cb68f1f60e6376(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:17,872 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:17,872 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=13, startTime=1734042917648; duration=0sec 2024-12-12T22:35:17,872 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:17,873 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:17,874 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,878 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:17,878 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:17,878 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:17,878 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/faf510f22ef0436f8d996c992d87c967, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=35.3 K 2024-12-12T22:35:17,879 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting faf510f22ef0436f8d996c992d87c967, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=80, earliestPutTs=1734042913090 2024-12-12T22:35:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,880 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 71f870bcea23401a904d21a30310b16a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734042914402 2024-12-12T22:35:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,885 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 42a0115ee15548beb659dce2142309a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741964_1140 (size=31195) 2024-12-12T22:35:17,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,915 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#114 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:17,915 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2c6eb27d71c748e0b2b14744676d3edb is 50, key is test_row_0/C:col10/1734042916835/Put/seqid=0 2024-12-12T22:35:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,937 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c60b137474e949039afbb03d4ca50254 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254 2024-12-12T22:35:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:17,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,958 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into c60b137474e949039afbb03d4ca50254(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:17,958 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:17,958 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=13, startTime=1734042917642; duration=0sec 2024-12-12T22:35:17,959 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:17,959 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:17,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741965_1141 (size=12241) 2024-12-12T22:35:18,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,014 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2c6eb27d71c748e0b2b14744676d3edb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2c6eb27d71c748e0b2b14744676d3edb 2024-12-12T22:35:18,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,024 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into 2c6eb27d71c748e0b2b14744676d3edb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:18,024 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:18,024 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=13, startTime=1734042917648; duration=0sec 2024-12-12T22:35:18,024 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:18,024 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:18,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:18,039 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:18,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042978190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042978190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042978200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042978200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042978200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,255 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/fc9d638c53ad45c2b9dab57ec52bf656 2024-12-12T22:35:18,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2560892471a242539a0c78229387b759 is 50, key is test_row_1/B:col10/1734042916863/Put/seqid=0 2024-12-12T22:35:18,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042978308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042978308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042978308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042978308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042978315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741966_1142 (size=9707) 2024-12-12T22:35:18,366 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2560892471a242539a0c78229387b759 2024-12-12T22:35:18,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/b252cba5404d44cfbc4204f6b5ebc4dd is 50, key is test_row_1/C:col10/1734042916863/Put/seqid=0 2024-12-12T22:35:18,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741967_1143 (size=9707) 2024-12-12T22:35:18,448 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/b252cba5404d44cfbc4204f6b5ebc4dd 2024-12-12T22:35:18,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:18,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/fc9d638c53ad45c2b9dab57ec52bf656 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656 2024-12-12T22:35:18,477 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656, entries=100, sequenceid=131, filesize=21.9 K 2024-12-12T22:35:18,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2560892471a242539a0c78229387b759 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759 2024-12-12T22:35:18,491 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759, entries=100, sequenceid=131, filesize=9.5 K 2024-12-12T22:35:18,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/b252cba5404d44cfbc4204f6b5ebc4dd as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd 2024-12-12T22:35:18,503 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd, entries=100, sequenceid=131, filesize=9.5 K 2024-12-12T22:35:18,510 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=181.14 KB/185490 for 8c758bf11da38bc1c9062a0f3e05f513 in 859ms, sequenceid=131, compaction requested=false 2024-12-12T22:35:18,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:18,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:18,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-12T22:35:18,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-12T22:35:18,521 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-12T22:35:18,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1870 sec 2024-12-12T22:35:18,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:18,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-12T22:35:18,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042978521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042978529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:18,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042978536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:18,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042978544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 1.2060 sec 2024-12-12T22:35:18,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042978558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124f011fa3524246aaa29243bae60b0b57_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:18,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741968_1144 (size=12304) 2024-12-12T22:35:18,619 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:18,634 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124f011fa3524246aaa29243bae60b0b57_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f011fa3524246aaa29243bae60b0b57_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:18,643 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e50d5ab8c065424f84d64bfd3985bd03, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:18,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e50d5ab8c065424f84d64bfd3985bd03 is 175, key is test_row_0/A:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:18,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042978643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,649 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042978644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042978659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741969_1145 (size=31105) 2024-12-12T22:35:18,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042978693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,696 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=69.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e50d5ab8c065424f84d64bfd3985bd03 2024-12-12T22:35:18,753 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/744c80bf3de443e19c566a89a72f29b2 is 50, key is test_row_0/B:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:18,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741970_1146 (size=12151) 2024-12-12T22:35:18,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/744c80bf3de443e19c566a89a72f29b2 2024-12-12T22:35:18,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042978846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042978855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042978860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/ffd4eeae60fd40609971c6fcae47f75f is 50, key is test_row_0/C:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:18,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042978880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042978901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:18,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741971_1147 (size=12151) 2024-12-12T22:35:18,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/ffd4eeae60fd40609971c6fcae47f75f 2024-12-12T22:35:18,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e50d5ab8c065424f84d64bfd3985bd03 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03 2024-12-12T22:35:19,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03, entries=150, sequenceid=168, filesize=30.4 K 2024-12-12T22:35:19,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,028 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/744c80bf3de443e19c566a89a72f29b2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2 2024-12-12T22:35:19,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2, entries=150, sequenceid=168, filesize=11.9 K 2024-12-12T22:35:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/ffd4eeae60fd40609971c6fcae47f75f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f 2024-12-12T22:35:19,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f, entries=150, sequenceid=168, filesize=11.9 K 2024-12-12T22:35:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~207.98 KB/212970, heapSize ~545.63 KB/558720, currentSize=0 B/0 for 8c758bf11da38bc1c9062a0f3e05f513 in 540ms, sequenceid=168, compaction requested=true 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:19,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:35:19,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,069 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,069 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,072 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:19,072 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:19,072 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,072 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2c6eb27d71c748e0b2b14744676d3edb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=33.3 K 2024-12-12T22:35:19,072 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:19,072 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:19,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=82.7 K 2024-12-12T22:35:19,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03] 2024-12-12T22:35:19,074 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c6eb27d71c748e0b2b14744676d3edb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:19,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,075 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c60b137474e949039afbb03d4ca50254, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:19,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,075 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b252cba5404d44cfbc4204f6b5ebc4dd, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734042916857 2024-12-12T22:35:19,076 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc9d638c53ad45c2b9dab57ec52bf656, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734042916857 2024-12-12T22:35:19,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,076 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e50d5ab8c065424f84d64bfd3985bd03, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:19,077 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ffd4eeae60fd40609971c6fcae47f75f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:19,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,104 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,111 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,112 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/a615823be8df4d9d80ae863068aff44f is 50, key is test_row_0/C:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:19,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,124 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212f978b6389bcb4277878786acfbbf3cac_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:19,126 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212f978b6389bcb4277878786acfbbf3cac_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:19,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,188 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f978b6389bcb4277878786acfbbf3cac_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:19,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741972_1148 (size=12493) 2024-12-12T22:35:19,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741973_1149 (size=4469) 2024-12-12T22:35:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,283 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#120 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:19,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,284 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b10c79552e30464f90cbb0c65e870f24 is 175, key is test_row_0/A:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:19,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741974_1150 (size=31447) 2024-12-12T22:35:19,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,356 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b10c79552e30464f90cbb0c65e870f24 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24 2024-12-12T22:35:19,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:19,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:19,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:19,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:19,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,388 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into b10c79552e30464f90cbb0c65e870f24(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:19,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:19,388 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=13, startTime=1734042919068; duration=0sec 2024-12-12T22:35:19,389 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:19,389 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:19,389 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:19,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,401 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:19,401 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:19,401 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,402 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c0ceb99cce1243fe86cb68f1f60e6376, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=33.3 K 2024-12-12T22:35:19,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,411 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0ceb99cce1243fe86cb68f1f60e6376, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1734042916714 2024-12-12T22:35:19,413 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2560892471a242539a0c78229387b759, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734042916857 2024-12-12T22:35:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,415 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 744c80bf3de443e19c566a89a72f29b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:19,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b6ed3aa83aa749fab5f447e71889d3a4_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042919340/Put/seqid=0 2024-12-12T22:35:19,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,440 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#123 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:19,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2f5abc5d70bb4b41892d341f4077a52a is 50, key is test_row_0/B:col10/1734042918519/Put/seqid=0 2024-12-12T22:35:19,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-12T22:35:19,455 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-12T22:35:19,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:19,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-12T22:35:19,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741975_1151 (size=24758) 2024-12-12T22:35:19,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042979476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:19,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:19,484 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:19,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:19,488 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042979478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042979481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042979482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042979480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:19,518 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b6ed3aa83aa749fab5f447e71889d3a4_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6ed3aa83aa749fab5f447e71889d3a4_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:19,519 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/be97bd83afdd418a9c3c186e1950b084, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:19,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/be97bd83afdd418a9c3c186e1950b084 is 175, key is test_row_0/A:col10/1734042919340/Put/seqid=0 2024-12-12T22:35:19,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741976_1152 (size=74395) 2024-12-12T22:35:19,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741977_1153 (size=12493) 2024-12-12T22:35:19,579 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/2f5abc5d70bb4b41892d341f4077a52a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2f5abc5d70bb4b41892d341f4077a52a 2024-12-12T22:35:19,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:19,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042979588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042979592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042979592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042979592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042979592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,609 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into 2f5abc5d70bb4b41892d341f4077a52a(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:19,609 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:19,609 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=13, startTime=1734042919068; duration=0sec 2024-12-12T22:35:19,609 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:19,610 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:19,638 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:19,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:19,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,643 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,681 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/a615823be8df4d9d80ae863068aff44f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/a615823be8df4d9d80ae863068aff44f 2024-12-12T22:35:19,698 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into a615823be8df4d9d80ae863068aff44f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:19,698 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:19,698 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=13, startTime=1734042919068; duration=0sec 2024-12-12T22:35:19,698 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:19,698 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:19,724 INFO [master/1aef280cf0a8:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-12T22:35:19,724 INFO [master/1aef280cf0a8:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-12T22:35:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:19,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042979797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:19,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:19,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042979804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042979806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042979804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042979807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,927 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=179, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/be97bd83afdd418a9c3c186e1950b084 2024-12-12T22:35:19,945 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e44d39d5c35246c7afa89bb343f8f02b is 50, key is test_row_0/B:col10/1734042919340/Put/seqid=0 2024-12-12T22:35:19,967 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:19,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:19,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741978_1154 (size=12151) 2024-12-12T22:35:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:20,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042980109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042980112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042980116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,118 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042980117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042980116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,128 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:20,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,132 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,286 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:20,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:20,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e44d39d5c35246c7afa89bb343f8f02b 2024-12-12T22:35:20,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/f434003d072c422b81035413857e00dd is 50, key is test_row_0/C:col10/1734042919340/Put/seqid=0 2024-12-12T22:35:20,450 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:20,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741979_1155 (size=12151) 2024-12-12T22:35:20,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/f434003d072c422b81035413857e00dd 2024-12-12T22:35:20,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/be97bd83afdd418a9c3c186e1950b084 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084 2024-12-12T22:35:20,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084, entries=400, sequenceid=179, filesize=72.7 K 2024-12-12T22:35:20,571 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e44d39d5c35246c7afa89bb343f8f02b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b 2024-12-12T22:35:20,605 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:20,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:20,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b, entries=150, sequenceid=179, filesize=11.9 K 2024-12-12T22:35:20,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:20,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/f434003d072c422b81035413857e00dd as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd 2024-12-12T22:35:20,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042980627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042980633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042980635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042980635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:20,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042980635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd, entries=150, sequenceid=179, filesize=11.9 K 2024-12-12T22:35:20,691 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 8c758bf11da38bc1c9062a0f3e05f513 in 1321ms, sequenceid=179, compaction requested=false 2024-12-12T22:35:20,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:20,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:20,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-12T22:35:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:20,784 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:35:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:20,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:20,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:20,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:20,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121238236dd0289f4e8393c84b028e1fe7cd_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042919480/Put/seqid=0 2024-12-12T22:35:20,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741980_1156 (size=12304) 2024-12-12T22:35:20,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:20,867 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121238236dd0289f4e8393c84b028e1fe7cd_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121238236dd0289f4e8393c84b028e1fe7cd_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:20,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/03a07e2d4ade4e5198b1ef795cb64798, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/03a07e2d4ade4e5198b1ef795cb64798 is 175, key is test_row_0/A:col10/1734042919480/Put/seqid=0 2024-12-12T22:35:20,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741981_1157 (size=31105) 2024-12-12T22:35:21,325 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=208, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/03a07e2d4ade4e5198b1ef795cb64798 2024-12-12T22:35:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/adadfd324a044179b83044b80e678356 is 50, key is test_row_0/B:col10/1734042919480/Put/seqid=0 2024-12-12T22:35:21,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741982_1158 (size=12151) 2024-12-12T22:35:21,443 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/adadfd324a044179b83044b80e678356 2024-12-12T22:35:21,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/5415e838471044bd905f99426185df9f is 50, key is test_row_0/C:col10/1734042919480/Put/seqid=0 2024-12-12T22:35:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741983_1159 (size=12151) 2024-12-12T22:35:21,580 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/5415e838471044bd905f99426185df9f 2024-12-12T22:35:21,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/03a07e2d4ade4e5198b1ef795cb64798 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798 2024-12-12T22:35:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:21,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:21,665 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798, entries=150, sequenceid=208, filesize=30.4 K 2024-12-12T22:35:21,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/adadfd324a044179b83044b80e678356 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356 2024-12-12T22:35:21,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042981665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042981673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042981665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042981686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042981687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,719 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356, entries=150, sequenceid=208, filesize=11.9 K 2024-12-12T22:35:21,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/5415e838471044bd905f99426185df9f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f 2024-12-12T22:35:21,773 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f, entries=150, sequenceid=208, filesize=11.9 K 2024-12-12T22:35:21,775 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 8c758bf11da38bc1c9062a0f3e05f513 in 991ms, sequenceid=208, compaction requested=true 2024-12-12T22:35:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:21,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-12T22:35:21,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-12T22:35:21,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:21,795 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:21,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:21,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-12T22:35:21,801 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3110 sec 2024-12-12T22:35:21,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 2.3240 sec 2024-12-12T22:35:21,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042981857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042981859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042981860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042981863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042981867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127ab9bee0810a48108d03be8875dc2420_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:21,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741984_1160 (size=17284) 2024-12-12T22:35:21,966 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:21,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042981975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042981975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042981975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042981975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:21,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:21,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042981975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,016 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127ab9bee0810a48108d03be8875dc2420_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127ab9bee0810a48108d03be8875dc2420_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:22,036 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c7ca9652853348ab9955b5ce4f555a29, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:22,036 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c7ca9652853348ab9955b5ce4f555a29 is 175, key is test_row_0/A:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:22,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741985_1161 (size=48389) 2024-12-12T22:35:22,123 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=221, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c7ca9652853348ab9955b5ce4f555a29 2024-12-12T22:35:22,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042982181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042982185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/30a9b0d70fcb47a6bdb2210a07e1f57d is 50, key is test_row_0/B:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:22,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042982199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042982199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042982205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741986_1162 (size=12151) 2024-12-12T22:35:22,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042982495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,511 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042982508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042982508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042982508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042982516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:22,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/30a9b0d70fcb47a6bdb2210a07e1f57d 2024-12-12T22:35:22,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e2e55636b064e5c80fc08f12fa4a8d1 is 50, key is test_row_0/C:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:22,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741987_1163 (size=12151) 2024-12-12T22:35:22,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e2e55636b064e5c80fc08f12fa4a8d1 2024-12-12T22:35:22,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/c7ca9652853348ab9955b5ce4f555a29 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29 2024-12-12T22:35:22,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29, entries=250, sequenceid=221, filesize=47.3 K 2024-12-12T22:35:22,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/30a9b0d70fcb47a6bdb2210a07e1f57d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d 2024-12-12T22:35:22,904 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d, entries=150, sequenceid=221, filesize=11.9 K 2024-12-12T22:35:22,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e2e55636b064e5c80fc08f12fa4a8d1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1 2024-12-12T22:35:22,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1, entries=150, sequenceid=221, filesize=11.9 K 2024-12-12T22:35:22,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8c758bf11da38bc1c9062a0f3e05f513 in 1147ms, sequenceid=221, compaction requested=true 2024-12-12T22:35:22,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:22,942 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:22,949 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 185336 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:22,949 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:22,950 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:22,950 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=181.0 K 2024-12-12T22:35:22,950 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:22,950 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29] 2024-12-12T22:35:22,951 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b10c79552e30464f90cbb0c65e870f24, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:22,955 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting be97bd83afdd418a9c3c186e1950b084, keycount=400, bloomtype=ROW, size=72.7 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734042919337 2024-12-12T22:35:22,955 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03a07e2d4ade4e5198b1ef795cb64798, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734042919474 2024-12-12T22:35:22,956 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c7ca9652853348ab9955b5ce4f555a29, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921646 2024-12-12T22:35:22,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:22,963 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:22,963 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:22,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:22,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:22,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:22,964 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:22,976 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:22,977 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:22,977 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:22,977 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2f5abc5d70bb4b41892d341f4077a52a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=47.8 K 2024-12-12T22:35:22,982 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f5abc5d70bb4b41892d341f4077a52a, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:22,985 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e44d39d5c35246c7afa89bb343f8f02b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734042919340 2024-12-12T22:35:22,986 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting adadfd324a044179b83044b80e678356, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734042919474 2024-12-12T22:35:22,988 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 30a9b0d70fcb47a6bdb2210a07e1f57d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921675 2024-12-12T22:35:23,016 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,025 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#133 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:23,026 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/1e876edbbcc54b9988273a44c1d9fce6 is 50, key is test_row_0/B:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:23,032 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412124bf51de61e894640b19abbdf45024e47_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,035 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412124bf51de61e894640b19abbdf45024e47_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,036 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124bf51de61e894640b19abbdf45024e47_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:35:23,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:23,065 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:23,067 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741988_1164 (size=12629) 2024-12-12T22:35:23,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741989_1165 (size=4469) 2024-12-12T22:35:23,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042983097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042983098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,123 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042983104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042983108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042983116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129a51477b7ea3472a822deb6df34667e6_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042923048/Put/seqid=0 2024-12-12T22:35:23,152 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/1e876edbbcc54b9988273a44c1d9fce6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/1e876edbbcc54b9988273a44c1d9fce6 2024-12-12T22:35:23,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741990_1166 (size=12304) 2024-12-12T22:35:23,195 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:23,219 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129a51477b7ea3472a822deb6df34667e6_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a51477b7ea3472a822deb6df34667e6_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:23,228 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d051beb01da740d69e34793b4bb23167, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d051beb01da740d69e34793b4bb23167 is 175, key is test_row_0/A:col10/1734042923048/Put/seqid=0 2024-12-12T22:35:23,230 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into 1e876edbbcc54b9988273a44c1d9fce6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:23,230 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:23,230 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=12, startTime=1734042922963; duration=0sec 2024-12-12T22:35:23,230 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:23,231 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:23,231 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:23,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042983216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,237 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:23,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,237 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:23,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042983232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,237 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:23,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042983220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042983236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,237 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/a615823be8df4d9d80ae863068aff44f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=47.8 K 2024-12-12T22:35:23,240 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a615823be8df4d9d80ae863068aff44f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042918190 2024-12-12T22:35:23,244 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f434003d072c422b81035413857e00dd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1734042919340 2024-12-12T22:35:23,246 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5415e838471044bd905f99426185df9f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1734042919474 2024-12-12T22:35:23,248 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042983240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,254 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e2e55636b064e5c80fc08f12fa4a8d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921675 2024-12-12T22:35:23,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741991_1167 (size=31105) 2024-12-12T22:35:23,270 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=247, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d051beb01da740d69e34793b4bb23167 2024-12-12T22:35:23,311 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:23,312 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/3b1039f8aff14f70a261206641e84c73 is 50, key is test_row_0/C:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:23,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/70bb004c773d472d864b4fb150efcc90 is 50, key is test_row_0/B:col10/1734042923048/Put/seqid=0 2024-12-12T22:35:23,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741992_1168 (size=12629) 2024-12-12T22:35:23,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741993_1169 (size=12151) 2024-12-12T22:35:23,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042983438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,447 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/70bb004c773d472d864b4fb150efcc90 2024-12-12T22:35:23,461 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042983441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042983442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042983448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,472 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/3b1039f8aff14f70a261206641e84c73 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/3b1039f8aff14f70a261206641e84c73 2024-12-12T22:35:23,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042983468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e8e7a5876994af68772b5d7f0bb6681 is 50, key is test_row_0/C:col10/1734042923048/Put/seqid=0 2024-12-12T22:35:23,503 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#132 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:23,504 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e3b47ace7679453abe6de4f98a896afb is 175, key is test_row_0/A:col10/1734042921792/Put/seqid=0 2024-12-12T22:35:23,513 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into 3b1039f8aff14f70a261206641e84c73(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:23,513 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:23,513 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=12, startTime=1734042922964; duration=0sec 2024-12-12T22:35:23,513 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:23,513 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:23,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741994_1170 (size=12151) 2024-12-12T22:35:23,559 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=247 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e8e7a5876994af68772b5d7f0bb6681 2024-12-12T22:35:23,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741995_1171 (size=31583) 2024-12-12T22:35:23,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d051beb01da740d69e34793b4bb23167 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167 2024-12-12T22:35:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-12T22:35:23,608 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-12T22:35:23,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:23,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167, entries=150, sequenceid=247, filesize=30.4 K 2024-12-12T22:35:23,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-12T22:35:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:23,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/70bb004c773d472d864b4fb150efcc90 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90 2024-12-12T22:35:23,620 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:23,623 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:23,624 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:23,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T22:35:23,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/9e8e7a5876994af68772b5d7f0bb6681 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681 2024-12-12T22:35:23,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681, entries=150, sequenceid=247, filesize=11.9 K 2024-12-12T22:35:23,644 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 8c758bf11da38bc1c9062a0f3e05f513 in 584ms, sequenceid=247, compaction requested=false 2024-12-12T22:35:23,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:23,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:23,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:23,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:23,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:23,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:23,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:23,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:23,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121262e07f14e10646afa526ea7f1b9c7284_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:23,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741996_1172 (size=14894) 2024-12-12T22:35:23,844 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:23,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042983852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042983853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042983854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,865 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042983861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042983868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,882 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121262e07f14e10646afa526ea7f1b9c7284_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121262e07f14e10646afa526ea7f1b9c7284_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:23,885 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/57a97a0748174b4ebb6d9abe40b582bb, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:23,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/57a97a0748174b4ebb6d9abe40b582bb is 175, key is test_row_0/A:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:23,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:23,943 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,944 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:23,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:23,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:23,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:23,946 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:23,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741997_1173 (size=39849) 2024-12-12T22:35:23,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042983963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042983970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,979 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e3b47ace7679453abe6de4f98a896afb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb 2024-12-12T22:35:23,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042983977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042983977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:23,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042983977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,019 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into e3b47ace7679453abe6de4f98a896afb(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T22:35:24,020 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:24,020 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=12, startTime=1734042922942; duration=1sec 2024-12-12T22:35:24,020 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:24,020 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:24,109 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,112 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042984183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042984184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042984184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042984184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042984195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:24,269 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,358 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=261, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/57a97a0748174b4ebb6d9abe40b582bb 2024-12-12T22:35:24,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/73df4a9ca825404e9acb474bb4862297 is 50, key is test_row_0/B:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:24,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741998_1174 (size=12251) 2024-12-12T22:35:24,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,438 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042984489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042984489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042984496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,501 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042984501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042984502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:24,771 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/73df4a9ca825404e9acb474bb4862297 2024-12-12T22:35:24,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/491a9de2c35f4e1cb4d14526ad695e9a is 50, key is test_row_0/C:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:24,938 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:24,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:24,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:24,939 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:24,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741999_1175 (size=12251) 2024-12-12T22:35:24,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042984996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:24,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042984996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:25,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042985003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:25,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042985016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:25,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042985019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:25,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:25,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,267 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:25,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:25,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,279 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/491a9de2c35f4e1cb4d14526ad695e9a 2024-12-12T22:35:25,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/57a97a0748174b4ebb6d9abe40b582bb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb 2024-12-12T22:35:25,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb, entries=200, sequenceid=261, filesize=38.9 K 2024-12-12T22:35:25,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/73df4a9ca825404e9acb474bb4862297 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297 2024-12-12T22:35:25,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297, entries=150, sequenceid=261, filesize=12.0 K 2024-12-12T22:35:25,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/491a9de2c35f4e1cb4d14526ad695e9a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a 2024-12-12T22:35:25,435 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:25,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:25,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:25,445 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a, entries=150, sequenceid=261, filesize=12.0 K 2024-12-12T22:35:25,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 8c758bf11da38bc1c9062a0f3e05f513 in 1690ms, sequenceid=261, compaction requested=true 2024-12-12T22:35:25,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:25,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:25,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:25,450 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:25,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:25,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:25,450 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:25,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:25,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:25,456 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:25,456 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37031 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:25,456 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:25,456 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:25,456 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,456 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=100.1 K 2024-12-12T22:35:25,456 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,456 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb] 2024-12-12T22:35:25,458 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,458 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3b47ace7679453abe6de4f98a896afb, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921675 2024-12-12T22:35:25,458 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/1e876edbbcc54b9988273a44c1d9fce6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.2 K 2024-12-12T22:35:25,458 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d051beb01da740d69e34793b4bb23167, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734042921860 2024-12-12T22:35:25,458 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e876edbbcc54b9988273a44c1d9fce6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921675 2024-12-12T22:35:25,463 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 70bb004c773d472d864b4fb150efcc90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734042921860 2024-12-12T22:35:25,463 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57a97a0748174b4ebb6d9abe40b582bb, keycount=200, bloomtype=ROW, size=38.9 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:25,466 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 73df4a9ca825404e9acb474bb4862297, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:25,494 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:25,494 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e04d2d1be8e8482dae5070797af42324 is 50, key is test_row_0/B:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:25,500 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:25,517 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212269a9abf272540238aba95cb105869df_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:25,518 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212269a9abf272540238aba95cb105869df_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:25,519 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212269a9abf272540238aba95cb105869df_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:25,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742000_1176 (size=12831) 2024-12-12T22:35:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742001_1177 (size=4469) 2024-12-12T22:35:25,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:25,594 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-12T22:35:25,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,594 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:25,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:25,603 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/e04d2d1be8e8482dae5070797af42324 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e04d2d1be8e8482dae5070797af42324 2024-12-12T22:35:25,633 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into e04d2d1be8e8482dae5070797af42324(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:25,634 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:25,634 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=13, startTime=1734042925450; duration=0sec 2024-12-12T22:35:25,634 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:25,634 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:25,634 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:25,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212da9579ecc45a4daa881f8addde51ce37_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042923855/Put/seqid=0 2024-12-12T22:35:25,643 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37031 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:25,643 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:25,643 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:25,643 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/3b1039f8aff14f70a261206641e84c73, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.2 K 2024-12-12T22:35:25,656 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b1039f8aff14f70a261206641e84c73, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1734042921675 2024-12-12T22:35:25,664 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e8e7a5876994af68772b5d7f0bb6681, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=247, earliestPutTs=1734042921860 2024-12-12T22:35:25,668 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 491a9de2c35f4e1cb4d14526ad695e9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:25,768 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#144 average throughput is 0.12 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:25,769 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/62a7d24294724a02b194b4b99da37737 is 50, key is test_row_0/C:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:25,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742002_1178 (size=12454) 2024-12-12T22:35:25,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742003_1179 (size=12831) 2024-12-12T22:35:25,891 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/62a7d24294724a02b194b4b99da37737 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/62a7d24294724a02b194b4b99da37737 2024-12-12T22:35:25,948 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into 62a7d24294724a02b194b4b99da37737(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:25,948 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:25,948 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=13, startTime=1734042925450; duration=0sec 2024-12-12T22:35:25,948 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:25,948 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:25,988 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#142 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:25,989 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4236f725bca24d5181ee7e40b324763c is 175, key is test_row_0/A:col10/1734042923102/Put/seqid=0 2024-12-12T22:35:26,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:26,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742004_1180 (size=31785) 2024-12-12T22:35:26,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042986036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042986039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042986046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042986050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042986051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042986153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042986156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042986163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042986164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,169 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042986168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:26,191 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212da9579ecc45a4daa881f8addde51ce37_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212da9579ecc45a4daa881f8addde51ce37_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:26,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e491f4d2ef29440fa156f31f67f74fb4, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:26,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e491f4d2ef29440fa156f31f67f74fb4 is 175, key is test_row_0/A:col10/1734042923855/Put/seqid=0 2024-12-12T22:35:26,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742005_1181 (size=31255) 2024-12-12T22:35:26,255 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e491f4d2ef29440fa156f31f67f74fb4 2024-12-12T22:35:26,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/712c8dbf26fe4773b2d4713f30b28df4 is 50, key is test_row_0/B:col10/1734042923855/Put/seqid=0 2024-12-12T22:35:26,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742006_1182 (size=12301) 2024-12-12T22:35:26,316 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/712c8dbf26fe4773b2d4713f30b28df4 2024-12-12T22:35:26,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/e14a13e929e04ccf9373878b92b83578 is 50, key is test_row_0/C:col10/1734042923855/Put/seqid=0 2024-12-12T22:35:26,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042986358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042986359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742007_1183 (size=12301) 2024-12-12T22:35:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042986368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,375 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/e14a13e929e04ccf9373878b92b83578 2024-12-12T22:35:26,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042986380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042986387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e491f4d2ef29440fa156f31f67f74fb4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4 2024-12-12T22:35:26,403 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4, entries=150, sequenceid=287, filesize=30.5 K 2024-12-12T22:35:26,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/712c8dbf26fe4773b2d4713f30b28df4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4 2024-12-12T22:35:26,423 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4, entries=150, sequenceid=287, filesize=12.0 K 2024-12-12T22:35:26,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/e14a13e929e04ccf9373878b92b83578 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578 2024-12-12T22:35:26,435 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578, entries=150, sequenceid=287, filesize=12.0 K 2024-12-12T22:35:26,440 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 8c758bf11da38bc1c9062a0f3e05f513 in 846ms, sequenceid=287, compaction requested=false 2024-12-12T22:35:26,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:26,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:26,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-12T22:35:26,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-12T22:35:26,446 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/4236f725bca24d5181ee7e40b324763c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c 2024-12-12T22:35:26,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-12T22:35:26,448 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8210 sec 2024-12-12T22:35:26,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 2.8370 sec 2024-12-12T22:35:26,460 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into 4236f725bca24d5181ee7e40b324763c(size=31.0 K), total size for store is 61.6 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T22:35:26,460 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:26,460 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=13, startTime=1734042925450; duration=1sec 2024-12-12T22:35:26,461 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:26,461 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:26,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:26,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e11529ff06dd462a8a38557fc10ea607_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:26,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042986734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042986734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042986734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042986735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042986739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742008_1184 (size=12454) 2024-12-12T22:35:26,753 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:26,762 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212e11529ff06dd462a8a38557fc10ea607_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e11529ff06dd462a8a38557fc10ea607_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:26,764 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d034ecf3b1ff45b8b8791dcd51d10e7a, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:26,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d034ecf3b1ff45b8b8791dcd51d10e7a is 175, key is test_row_0/A:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:26,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742009_1185 (size=31255) 2024-12-12T22:35:26,792 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=304, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d034ecf3b1ff45b8b8791dcd51d10e7a 2024-12-12T22:35:26,805 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c8e89e048ec3461797853769129af4f5 is 50, key is test_row_0/B:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742010_1186 (size=12301) 2024-12-12T22:35:26,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c8e89e048ec3461797853769129af4f5 2024-12-12T22:35:26,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042986838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042986838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042986839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042986839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042986846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:26,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/94f434226c8448518e5844dfc436c04f is 50, key is test_row_0/C:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:26,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742011_1187 (size=12301) 2024-12-12T22:35:27,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042987048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042987048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042987048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042987048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042987051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/94f434226c8448518e5844dfc436c04f 2024-12-12T22:35:27,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/d034ecf3b1ff45b8b8791dcd51d10e7a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a 2024-12-12T22:35:27,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042987350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042987350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,354 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042987351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042987351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042987358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a, entries=150, sequenceid=304, filesize=30.5 K 2024-12-12T22:35:27,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c8e89e048ec3461797853769129af4f5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5 2024-12-12T22:35:27,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T22:35:27,417 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/94f434226c8448518e5844dfc436c04f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f 2024-12-12T22:35:27,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T22:35:27,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 8c758bf11da38bc1c9062a0f3e05f513 in 808ms, sequenceid=304, compaction requested=true 2024-12-12T22:35:27,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:27,475 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:27,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:35:27,475 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:27,488 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:27,488 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:27,492 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:27,493 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e04d2d1be8e8482dae5070797af42324, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.6 K 2024-12-12T22:35:27,488 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94295 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:27,493 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:27,493 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:27,494 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=92.1 K 2024-12-12T22:35:27,494 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:27,494 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a] 2024-12-12T22:35:27,499 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4236f725bca24d5181ee7e40b324763c, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:27,499 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e04d2d1be8e8482dae5070797af42324, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:27,503 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 712c8dbf26fe4773b2d4713f30b28df4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734042923850 2024-12-12T22:35:27,503 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e491f4d2ef29440fa156f31f67f74fb4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734042923850 2024-12-12T22:35:27,504 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c8e89e048ec3461797853769129af4f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:27,512 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d034ecf3b1ff45b8b8791dcd51d10e7a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:27,569 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:27,581 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#151 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:27,582 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/f124ff601ccf4bc18dd5612d50ab4ac3 is 50, key is test_row_0/B:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:27,586 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412129e819a9391a54739a1bd8ae670833d50_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:27,599 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412129e819a9391a54739a1bd8ae670833d50_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:27,600 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129e819a9391a54739a1bd8ae670833d50_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:27,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742012_1188 (size=12983) 2024-12-12T22:35:27,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742013_1189 (size=4469) 2024-12-12T22:35:27,707 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#150 average throughput is 0.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:27,708 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/043047ce10184b6d960e8fbc3eee313b is 175, key is test_row_0/A:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:27,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742014_1190 (size=31937) 2024-12-12T22:35:27,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-12T22:35:27,792 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-12T22:35:27,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:27,815 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/043047ce10184b6d960e8fbc3eee313b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b 2024-12-12T22:35:27,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-12T22:35:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:27,843 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:27,843 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:27,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:27,847 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into 043047ce10184b6d960e8fbc3eee313b(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:27,847 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:27,847 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=13, startTime=1734042927474; duration=0sec 2024-12-12T22:35:27,848 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:27,848 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:27,848 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:27,849 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37433 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:27,849 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:27,849 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:27,850 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/62a7d24294724a02b194b4b99da37737, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.6 K 2024-12-12T22:35:27,851 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62a7d24294724a02b194b4b99da37737, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1734042923091 2024-12-12T22:35:27,854 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e14a13e929e04ccf9373878b92b83578, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1734042923850 2024-12-12T22:35:27,855 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94f434226c8448518e5844dfc436c04f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:27,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T22:35:27,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:27,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:27,888 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#152 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:27,889 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/b344e96c5c54415dad09e02121c67bef is 50, key is test_row_0/C:col10/1734042926664/Put/seqid=0 2024-12-12T22:35:27,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126ad9ad4bade74876b88f3e3e44785735_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042927877/Put/seqid=0 2024-12-12T22:35:27,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042987908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042987920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042987920, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042987921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042987921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:27,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:27,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742015_1191 (size=12983) 2024-12-12T22:35:27,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742016_1192 (size=12454) 2024-12-12T22:35:28,005 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,016 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042988025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042988028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042988028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042988035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042988039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,090 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/f124ff601ccf4bc18dd5612d50ab4ac3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f124ff601ccf4bc18dd5612d50ab4ac3 2024-12-12T22:35:28,115 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into f124ff601ccf4bc18dd5612d50ab4ac3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:28,115 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:28,116 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=13, startTime=1734042927475; duration=0sec 2024-12-12T22:35:28,116 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:28,116 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:28,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:28,175 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,181 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042988231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042988237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042988237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042988249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042988252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,356 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,358 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,395 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:28,395 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/b344e96c5c54415dad09e02121c67bef as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b344e96c5c54415dad09e02121c67bef 2024-12-12T22:35:28,402 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126ad9ad4bade74876b88f3e3e44785735_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ad9ad4bade74876b88f3e3e44785735_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:28,419 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/a96aeeae2ae74b2ab93c2aaa2ca89324, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:28,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/a96aeeae2ae74b2ab93c2aaa2ca89324 is 175, key is test_row_0/A:col10/1734042927877/Put/seqid=0 2024-12-12T22:35:28,440 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into b344e96c5c54415dad09e02121c67bef(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:28,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:28,440 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=13, startTime=1734042927475; duration=0sec 2024-12-12T22:35:28,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:28,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:28,440 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:28,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742017_1193 (size=31255) 2024-12-12T22:35:28,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042988547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042988549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042988563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042988563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:28,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042988571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,839 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:28,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:28,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:28,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:28,841 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:28,884 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=329, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/a96aeeae2ae74b2ab93c2aaa2ca89324 2024-12-12T22:35:28,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/f73066a901ca4191b334ab6a7f26fba4 is 50, key is test_row_0/B:col10/1734042927877/Put/seqid=0 2024-12-12T22:35:28,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:28,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742018_1194 (size=12301) 2024-12-12T22:35:29,011 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,013 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:29,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042989065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:29,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042989067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:29,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042989072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042989074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:29,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042989083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,181 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,182 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/f73066a901ca4191b334ab6a7f26fba4 2024-12-12T22:35:29,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/8656fe7bec054591ab602f53a8ad78c9 is 50, key is test_row_0/C:col10/1734042927877/Put/seqid=0 2024-12-12T22:35:29,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742019_1195 (size=12301) 2024-12-12T22:35:29,504 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,667 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,827 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,828 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/8656fe7bec054591ab602f53a8ad78c9 2024-12-12T22:35:29,924 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/a96aeeae2ae74b2ab93c2aaa2ca89324 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324 2024-12-12T22:35:29,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:29,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324, entries=150, sequenceid=329, filesize=30.5 K 2024-12-12T22:35:29,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/f73066a901ca4191b334ab6a7f26fba4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4 2024-12-12T22:35:29,990 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:29,992 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:29,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:29,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:29,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:29,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4, entries=150, sequenceid=329, filesize=12.0 K 2024-12-12T22:35:30,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/8656fe7bec054591ab602f53a8ad78c9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9 2024-12-12T22:35:30,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9, entries=150, sequenceid=329, filesize=12.0 K 2024-12-12T22:35:30,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 8c758bf11da38bc1c9062a0f3e05f513 in 2195ms, sequenceid=329, compaction requested=false 2024-12-12T22:35:30,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:30,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T22:35:30,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:30,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:30,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:30,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:30,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:30,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:30,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121239f50622393140748c45291bd8230404_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:30,147 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042990143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042990147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042990160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042990164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042990175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742020_1196 (size=12454) 2024-12-12T22:35:30,187 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:30,243 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121239f50622393140748c45291bd8230404_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f50622393140748c45291bd8230404_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:30,263 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b61ba86927ac411aa1a819c033e5f3a1, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:30,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b61ba86927ac411aa1a819c033e5f3a1 is 175, key is test_row_0/A:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:30,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042990273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042990274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042990275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042990278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042990292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742021_1197 (size=31255) 2024-12-12T22:35:30,309 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=347, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b61ba86927ac411aa1a819c033e5f3a1 2024-12-12T22:35:30,311 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/775b8b5090464d44af2c0c9c56e5b7ca is 50, key is test_row_0/B:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:30,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742022_1198 (size=12301) 2024-12-12T22:35:30,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/775b8b5090464d44af2c0c9c56e5b7ca 2024-12-12T22:35:30,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d8345c2e915b44d8b468ca87af9e51bd is 50, key is test_row_0/C:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:30,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042990482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,490 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042990483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042990483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042990495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042990504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742023_1199 (size=12301) 2024-12-12T22:35:30,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,647 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41226 deadline: 1734042990796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41268 deadline: 1734042990802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41252 deadline: 1734042990796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,810 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41282 deadline: 1734042990809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:30,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41234 deadline: 1734042990812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d8345c2e915b44d8b468ca87af9e51bd 2024-12-12T22:35:30,975 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:30,976 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:30,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:30,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:30,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:30,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:31,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/b61ba86927ac411aa1a819c033e5f3a1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1 2024-12-12T22:35:31,076 DEBUG [Thread-569 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7af61386 to 127.0.0.1:50645 2024-12-12T22:35:31,076 DEBUG [Thread-569 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,080 DEBUG [Thread-565 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:50645 2024-12-12T22:35:31,081 DEBUG [Thread-565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,081 DEBUG [Thread-563 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x152377d4 to 127.0.0.1:50645 2024-12-12T22:35:31,081 DEBUG [Thread-563 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,083 DEBUG [Thread-567 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:50645 2024-12-12T22:35:31,083 DEBUG [Thread-567 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,106 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1, entries=150, sequenceid=347, filesize=30.5 K 2024-12-12T22:35:31,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/775b8b5090464d44af2c0c9c56e5b7ca as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca 2024-12-12T22:35:31,131 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:31,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:31,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:31,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:31,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:31,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:31,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca, entries=150, sequenceid=347, filesize=12.0 K 2024-12-12T22:35:31,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/d8345c2e915b44d8b468ca87af9e51bd as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd 2024-12-12T22:35:31,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd, entries=150, sequenceid=347, filesize=12.0 K 2024-12-12T22:35:31,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=127.47 KB/130530 for 8c758bf11da38bc1c9062a0f3e05f513 in 1085ms, sequenceid=347, compaction requested=true 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:31,180 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c758bf11da38bc1c9062a0f3e05f513:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:31,180 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:31,180 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:31,184 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94447 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:31,184 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/A is initiating minor compaction (all files) 2024-12-12T22:35:31,184 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/A in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,185 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=92.2 K 2024-12-12T22:35:31,185 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,185 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1] 2024-12-12T22:35:31,186 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:31,186 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 043047ce10184b6d960e8fbc3eee313b, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:31,186 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/B is initiating minor compaction (all files) 2024-12-12T22:35:31,186 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/B in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,186 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f124ff601ccf4bc18dd5612d50ab4ac3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.7 K 2024-12-12T22:35:31,186 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting a96aeeae2ae74b2ab93c2aaa2ca89324, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734042926733 2024-12-12T22:35:31,186 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f124ff601ccf4bc18dd5612d50ab4ac3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:31,190 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f73066a901ca4191b334ab6a7f26fba4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734042926733 2024-12-12T22:35:31,190 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b61ba86927ac411aa1a819c033e5f3a1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1734042927905 2024-12-12T22:35:31,191 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 775b8b5090464d44af2c0c9c56e5b7ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1734042927905 2024-12-12T22:35:31,229 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#B#compaction#159 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:31,230 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/13d058a7f05b47a0817698a642bb48f6 is 50, key is test_row_0/B:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:31,231 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:35:31,252 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:31,264 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212906ad6e49b67408bb7a972b70b22317e_8c758bf11da38bc1c9062a0f3e05f513 store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:31,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742024_1200 (size=13085) 2024-12-12T22:35:31,296 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:31,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-12T22:35:31,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,299 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:35:31,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:31,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:31,315 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/13d058a7f05b47a0817698a642bb48f6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/13d058a7f05b47a0817698a642bb48f6 2024-12-12T22:35:31,318 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212906ad6e49b67408bb7a972b70b22317e_8c758bf11da38bc1c9062a0f3e05f513, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:31,318 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212906ad6e49b67408bb7a972b70b22317e_8c758bf11da38bc1c9062a0f3e05f513 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:31,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:31,319 DEBUG [Thread-556 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a9306be to 127.0.0.1:50645 2024-12-12T22:35:31,319 DEBUG [Thread-556 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,322 DEBUG [Thread-558 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x769942d9 to 127.0.0.1:50645 2024-12-12T22:35:31,322 DEBUG [Thread-558 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,323 DEBUG [Thread-550 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4c7d6279 to 127.0.0.1:50645 2024-12-12T22:35:31,323 DEBUG [Thread-550 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. as already flushing 2024-12-12T22:35:31,333 DEBUG [Thread-560 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x00cb464a to 127.0.0.1:50645 2024-12-12T22:35:31,333 DEBUG [Thread-560 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,335 DEBUG [Thread-554 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b55744e to 127.0.0.1:50645 2024-12-12T22:35:31,335 DEBUG [Thread-554 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:31,359 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/B of 8c758bf11da38bc1c9062a0f3e05f513 into 13d058a7f05b47a0817698a642bb48f6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:31,359 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:31,360 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/B, priority=13, startTime=1734042931180; duration=0sec 2024-12-12T22:35:31,360 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:31,360 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:B 2024-12-12T22:35:31,360 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:31,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126b1a342661314e7e89e0fb0a30c3c61f_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042930152/Put/seqid=0 2024-12-12T22:35:31,373 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:31,373 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 8c758bf11da38bc1c9062a0f3e05f513/C is initiating minor compaction (all files) 2024-12-12T22:35:31,373 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8c758bf11da38bc1c9062a0f3e05f513/C in TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:31,373 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b344e96c5c54415dad09e02121c67bef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp, totalSize=36.7 K 2024-12-12T22:35:31,376 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b344e96c5c54415dad09e02121c67bef, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734042926037 2024-12-12T22:35:31,383 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 8656fe7bec054591ab602f53a8ad78c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734042926733 2024-12-12T22:35:31,388 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d8345c2e915b44d8b468ca87af9e51bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1734042927905 2024-12-12T22:35:31,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742026_1202 (size=12454) 2024-12-12T22:35:31,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742025_1201 (size=4469) 2024-12-12T22:35:31,479 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#C#compaction#162 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:31,480 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2cfbbd63b2cb4285bf8a84cab76ec229 is 50, key is test_row_0/C:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:31,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742027_1203 (size=13085) 2024-12-12T22:35:31,573 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/2cfbbd63b2cb4285bf8a84cab76ec229 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfbbd63b2cb4285bf8a84cab76ec229 2024-12-12T22:35:31,592 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/C of 8c758bf11da38bc1c9062a0f3e05f513 into 2cfbbd63b2cb4285bf8a84cab76ec229(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:31,592 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:31,592 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/C, priority=13, startTime=1734042931180; duration=0sec 2024-12-12T22:35:31,592 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:31,592 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:C 2024-12-12T22:35:31,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:31,843 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c758bf11da38bc1c9062a0f3e05f513#A#compaction#160 average throughput is 0.04 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:31,844 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e045856fb8a542a2adcecbe2b2b9a846 is 175, key is test_row_0/A:col10/1734042930083/Put/seqid=0 2024-12-12T22:35:31,861 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126b1a342661314e7e89e0fb0a30c3c61f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b1a342661314e7e89e0fb0a30c3c61f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:31,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/38d40a15a7304a51983ae6cf2e5c76ad, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:31,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/38d40a15a7304a51983ae6cf2e5c76ad is 175, key is test_row_0/A:col10/1734042930152/Put/seqid=0 2024-12-12T22:35:31,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742028_1204 (size=32039) 2024-12-12T22:35:31,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742029_1205 (size=31255) 2024-12-12T22:35:31,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:32,298 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/e045856fb8a542a2adcecbe2b2b9a846 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e045856fb8a542a2adcecbe2b2b9a846 2024-12-12T22:35:32,306 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8c758bf11da38bc1c9062a0f3e05f513/A of 8c758bf11da38bc1c9062a0f3e05f513 into e045856fb8a542a2adcecbe2b2b9a846(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 1sec to execute. 2024-12-12T22:35:32,306 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:32,306 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513., storeName=8c758bf11da38bc1c9062a0f3e05f513/A, priority=13, startTime=1734042931180; duration=1sec 2024-12-12T22:35:32,306 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:32,306 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c758bf11da38bc1c9062a0f3e05f513:A 2024-12-12T22:35:32,320 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/38d40a15a7304a51983ae6cf2e5c76ad 2024-12-12T22:35:32,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c182186ac90f496c8ea9d256e7ed51d0 is 50, key is test_row_0/B:col10/1734042930152/Put/seqid=0 2024-12-12T22:35:32,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742030_1206 (size=12301) 2024-12-12T22:35:32,777 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c182186ac90f496c8ea9d256e7ed51d0 2024-12-12T22:35:32,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/bb844aa5156940b4923251cd179b0d30 is 50, key is test_row_0/C:col10/1734042930152/Put/seqid=0 2024-12-12T22:35:32,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742031_1207 (size=12301) 2024-12-12T22:35:33,221 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/bb844aa5156940b4923251cd179b0d30 2024-12-12T22:35:33,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/38d40a15a7304a51983ae6cf2e5c76ad as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/38d40a15a7304a51983ae6cf2e5c76ad 2024-12-12T22:35:33,234 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/38d40a15a7304a51983ae6cf2e5c76ad, entries=150, sequenceid=369, filesize=30.5 K 2024-12-12T22:35:33,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/c182186ac90f496c8ea9d256e7ed51d0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c182186ac90f496c8ea9d256e7ed51d0 2024-12-12T22:35:33,242 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c182186ac90f496c8ea9d256e7ed51d0, entries=150, sequenceid=369, filesize=12.0 K 2024-12-12T22:35:33,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/bb844aa5156940b4923251cd179b0d30 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/bb844aa5156940b4923251cd179b0d30 2024-12-12T22:35:33,258 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/bb844aa5156940b4923251cd179b0d30, entries=150, sequenceid=369, filesize=12.0 K 2024-12-12T22:35:33,260 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=33.54 KB/34350 for 8c758bf11da38bc1c9062a0f3e05f513 in 1961ms, sequenceid=369, compaction requested=false 2024-12-12T22:35:33,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:33,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:33,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-12T22:35:33,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-12T22:35:33,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-12T22:35:33,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 5.4270 sec 2024-12-12T22:35:33,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 5.4620 sec 2024-12-12T22:35:35,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-12T22:35:35,952 INFO [Thread-562 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2518 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 2350 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1050 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3150 rows 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1078 2024-12-12T22:35:35,953 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3234 rows 2024-12-12T22:35:35,953 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:35:35,953 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a378df6 to 127.0.0.1:50645 2024-12-12T22:35:35,953 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:35:35,957 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:35:35,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:35:35,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:35,966 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042935965"}]},"ts":"1734042935965"} 2024-12-12T22:35:35,970 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:35:35,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:36,004 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:35:36,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:35:36,010 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=49, ppid=48, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, UNASSIGN}] 2024-12-12T22:35:36,012 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=49, ppid=48, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, UNASSIGN 2024-12-12T22:35:36,013 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=49 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:36,015 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:35:36,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; CloseRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:36,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:36,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:36,168 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] handler.UnassignRegionHandler(124): Close 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:36,168 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1681): Closing 8c758bf11da38bc1c9062a0f3e05f513, disabling compactions & flushes 2024-12-12T22:35:36,169 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. after waiting 0 ms 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:36,169 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(2837): Flushing 8c758bf11da38bc1c9062a0f3e05f513 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=A 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=B 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8c758bf11da38bc1c9062a0f3e05f513, store=C 2024-12-12T22:35:36,169 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:36,202 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212316e2a08c669422a8394b16f1d86604f_8c758bf11da38bc1c9062a0f3e05f513 is 50, key is test_row_0/A:col10/1734042931320/Put/seqid=0 2024-12-12T22:35:36,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742032_1208 (size=12454) 2024-12-12T22:35:36,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:36,651 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:36,663 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212316e2a08c669422a8394b16f1d86604f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212316e2a08c669422a8394b16f1d86604f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:36,665 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/0a04638bcadc4e989ad9cfb10e6f84e8, store: [table=TestAcidGuarantees family=A region=8c758bf11da38bc1c9062a0f3e05f513] 2024-12-12T22:35:36,665 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/0a04638bcadc4e989ad9cfb10e6f84e8 is 175, key is test_row_0/A:col10/1734042931320/Put/seqid=0 2024-12-12T22:35:36,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742033_1209 (size=31255) 2024-12-12T22:35:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:37,093 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=380, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/0a04638bcadc4e989ad9cfb10e6f84e8 2024-12-12T22:35:37,184 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/25a510d9d5694d26bf54a63b245dec76 is 50, key is test_row_0/B:col10/1734042931320/Put/seqid=0 2024-12-12T22:35:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742034_1210 (size=12301) 2024-12-12T22:35:37,343 DEBUG [master/1aef280cf0a8:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 20f68c42b55d0d7b4a49ed486e40f5a4 changed from -1.0 to 0.0, refreshing cache 2024-12-12T22:35:37,621 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/25a510d9d5694d26bf54a63b245dec76 2024-12-12T22:35:37,670 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/708cdd2e2b90480194f961b1c95da475 is 50, key is test_row_0/C:col10/1734042931320/Put/seqid=0 2024-12-12T22:35:37,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742035_1211 (size=12301) 2024-12-12T22:35:38,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:38,144 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=380 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/708cdd2e2b90480194f961b1c95da475 2024-12-12T22:35:38,171 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/A/0a04638bcadc4e989ad9cfb10e6f84e8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/0a04638bcadc4e989ad9cfb10e6f84e8 2024-12-12T22:35:38,183 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/0a04638bcadc4e989ad9cfb10e6f84e8, entries=150, sequenceid=380, filesize=30.5 K 2024-12-12T22:35:38,189 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/B/25a510d9d5694d26bf54a63b245dec76 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/25a510d9d5694d26bf54a63b245dec76 2024-12-12T22:35:38,206 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/25a510d9d5694d26bf54a63b245dec76, entries=150, sequenceid=380, filesize=12.0 K 2024-12-12T22:35:38,210 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/.tmp/C/708cdd2e2b90480194f961b1c95da475 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/708cdd2e2b90480194f961b1c95da475 2024-12-12T22:35:38,225 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/708cdd2e2b90480194f961b1c95da475, entries=150, sequenceid=380, filesize=12.0 K 2024-12-12T22:35:38,235 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 8c758bf11da38bc1c9062a0f3e05f513 in 2066ms, sequenceid=380, compaction requested=true 2024-12-12T22:35:38,240 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1] to archive 2024-12-12T22:35:38,245 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:38,258 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/532f50d51a12459abe878667a5144a7c 2024-12-12T22:35:38,258 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/61fa2c92053644d8b567441b4c45f177 2024-12-12T22:35:38,258 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/1e073f3f2d434381a955b15d42eae04a 2024-12-12T22:35:38,261 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4fef373f767c4a9890e7c322af369fae 2024-12-12T22:35:38,266 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/3d65b371282c4246b44467c268f675c4 2024-12-12T22:35:38,273 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/fc9d638c53ad45c2b9dab57ec52bf656 2024-12-12T22:35:38,273 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b10c79552e30464f90cbb0c65e870f24 2024-12-12T22:35:38,274 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/03a07e2d4ade4e5198b1ef795cb64798 2024-12-12T22:35:38,271 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/7929e70cc9dd48d28eb1f923696ded97 2024-12-12T22:35:38,277 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/2c841c05ecb44fa1a09dc3a7ed6c4d41 2024-12-12T22:35:38,278 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e3b47ace7679453abe6de4f98a896afb 2024-12-12T22:35:38,278 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/be97bd83afdd418a9c3c186e1950b084 2024-12-12T22:35:38,279 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c7ca9652853348ab9955b5ce4f555a29 2024-12-12T22:35:38,283 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e50d5ab8c065424f84d64bfd3985bd03 2024-12-12T22:35:38,287 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/c60b137474e949039afbb03d4ca50254 2024-12-12T22:35:38,288 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d051beb01da740d69e34793b4bb23167 2024-12-12T22:35:38,288 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/57a97a0748174b4ebb6d9abe40b582bb 2024-12-12T22:35:38,289 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e491f4d2ef29440fa156f31f67f74fb4 2024-12-12T22:35:38,290 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/043047ce10184b6d960e8fbc3eee313b 2024-12-12T22:35:38,290 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/d034ecf3b1ff45b8b8791dcd51d10e7a 2024-12-12T22:35:38,291 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/4236f725bca24d5181ee7e40b324763c 2024-12-12T22:35:38,292 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/a96aeeae2ae74b2ab93c2aaa2ca89324 2024-12-12T22:35:38,292 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/b61ba86927ac411aa1a819c033e5f3a1 2024-12-12T22:35:38,304 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/08b7669939974cc28802c5c22fe10427, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c0ceb99cce1243fe86cb68f1f60e6376, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2f5abc5d70bb4b41892d341f4077a52a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/1e876edbbcc54b9988273a44c1d9fce6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e04d2d1be8e8482dae5070797af42324, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f124ff601ccf4bc18dd5612d50ab4ac3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca] to archive 2024-12-12T22:35:38,311 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:38,323 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/08b7669939974cc28802c5c22fe10427 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/08b7669939974cc28802c5c22fe10427 2024-12-12T22:35:38,323 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2695466bb6bf4259b223fe62b9e273ad 2024-12-12T22:35:38,323 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2876b56e97514cb28f1eec9e97282b98 2024-12-12T22:35:38,323 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/3bdee010924745b9b7b7618679dbc053 2024-12-12T22:35:38,323 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c0ceb99cce1243fe86cb68f1f60e6376 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c0ceb99cce1243fe86cb68f1f60e6376 2024-12-12T22:35:38,324 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e1ea9ca576f941c3acfdd83515222ebe 2024-12-12T22:35:38,327 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2f5abc5d70bb4b41892d341f4077a52a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2f5abc5d70bb4b41892d341f4077a52a 2024-12-12T22:35:38,328 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e44d39d5c35246c7afa89bb343f8f02b 2024-12-12T22:35:38,328 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/adadfd324a044179b83044b80e678356 2024-12-12T22:35:38,329 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/2560892471a242539a0c78229387b759 2024-12-12T22:35:38,329 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/744c80bf3de443e19c566a89a72f29b2 2024-12-12T22:35:38,332 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/39fb24d1b59d4f37a2733a4ad53719ed 2024-12-12T22:35:38,330 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/84027398ce6146af9ebdd6ceb4efea47 2024-12-12T22:35:38,337 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/73df4a9ca825404e9acb474bb4862297 2024-12-12T22:35:38,337 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/1e876edbbcc54b9988273a44c1d9fce6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/1e876edbbcc54b9988273a44c1d9fce6 2024-12-12T22:35:38,337 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/70bb004c773d472d864b4fb150efcc90 2024-12-12T22:35:38,337 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/30a9b0d70fcb47a6bdb2210a07e1f57d 2024-12-12T22:35:38,344 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/712c8dbf26fe4773b2d4713f30b28df4 2024-12-12T22:35:38,345 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e04d2d1be8e8482dae5070797af42324 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/e04d2d1be8e8482dae5070797af42324 2024-12-12T22:35:38,350 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f124ff601ccf4bc18dd5612d50ab4ac3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f124ff601ccf4bc18dd5612d50ab4ac3 2024-12-12T22:35:38,350 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/775b8b5090464d44af2c0c9c56e5b7ca 2024-12-12T22:35:38,350 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c8e89e048ec3461797853769129af4f5 2024-12-12T22:35:38,355 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/f73066a901ca4191b334ab6a7f26fba4 2024-12-12T22:35:38,364 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/faf510f22ef0436f8d996c992d87c967, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2c6eb27d71c748e0b2b14744676d3edb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/a615823be8df4d9d80ae863068aff44f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/3b1039f8aff14f70a261206641e84c73, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/62a7d24294724a02b194b4b99da37737, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b344e96c5c54415dad09e02121c67bef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd] to archive 2024-12-12T22:35:38,375 DEBUG [StoreCloser-TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:35:38,398 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/faf510f22ef0436f8d996c992d87c967 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/faf510f22ef0436f8d996c992d87c967 2024-12-12T22:35:38,398 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d35dd6539b754d53baa395ad8ff75e65 2024-12-12T22:35:38,399 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2eacf46601a447f2b145df5dbd788768 2024-12-12T22:35:38,399 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfa68c959bb46f6b5382363b3a99446 2024-12-12T22:35:38,399 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/77af206244e349e1ab002c1a1e9022d1 2024-12-12T22:35:38,406 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/71f870bcea23401a904d21a30310b16a 2024-12-12T22:35:38,407 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b252cba5404d44cfbc4204f6b5ebc4dd 2024-12-12T22:35:38,408 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/ffd4eeae60fd40609971c6fcae47f75f 2024-12-12T22:35:38,408 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/f434003d072c422b81035413857e00dd 2024-12-12T22:35:38,408 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/a615823be8df4d9d80ae863068aff44f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/a615823be8df4d9d80ae863068aff44f 2024-12-12T22:35:38,412 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/42a0115ee15548beb659dce2142309a1 2024-12-12T22:35:38,413 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2c6eb27d71c748e0b2b14744676d3edb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2c6eb27d71c748e0b2b14744676d3edb 2024-12-12T22:35:38,413 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/5415e838471044bd905f99426185df9f 2024-12-12T22:35:38,414 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e2e55636b064e5c80fc08f12fa4a8d1 2024-12-12T22:35:38,414 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/9e8e7a5876994af68772b5d7f0bb6681 2024-12-12T22:35:38,414 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/3b1039f8aff14f70a261206641e84c73 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/3b1039f8aff14f70a261206641e84c73 2024-12-12T22:35:38,415 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/62a7d24294724a02b194b4b99da37737 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/62a7d24294724a02b194b4b99da37737 2024-12-12T22:35:38,415 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/491a9de2c35f4e1cb4d14526ad695e9a 2024-12-12T22:35:38,419 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b344e96c5c54415dad09e02121c67bef to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/b344e96c5c54415dad09e02121c67bef 2024-12-12T22:35:38,420 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/8656fe7bec054591ab602f53a8ad78c9 2024-12-12T22:35:38,421 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/d8345c2e915b44d8b468ca87af9e51bd 2024-12-12T22:35:38,421 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/e14a13e929e04ccf9373878b92b83578 2024-12-12T22:35:38,421 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/94f434226c8448518e5844dfc436c04f 2024-12-12T22:35:38,437 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits/383.seqid, newMaxSeqId=383, maxSeqId=4 2024-12-12T22:35:38,439 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513. 2024-12-12T22:35:38,439 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] regionserver.HRegion(1635): Region close journal for 8c758bf11da38bc1c9062a0f3e05f513: 2024-12-12T22:35:38,444 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=50}] handler.UnassignRegionHandler(170): Closed 8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:38,444 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=49 updating hbase:meta row=8c758bf11da38bc1c9062a0f3e05f513, regionState=CLOSED 2024-12-12T22:35:38,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-12T22:35:38,447 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseRegionProcedure 8c758bf11da38bc1c9062a0f3e05f513, server=1aef280cf0a8,36025,1734042873576 in 2.4300 sec 2024-12-12T22:35:38,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=49, resume processing ppid=48 2024-12-12T22:35:38,448 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, ppid=48, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8c758bf11da38bc1c9062a0f3e05f513, UNASSIGN in 2.4370 sec 2024-12-12T22:35:38,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-12T22:35:38,451 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.4410 sec 2024-12-12T22:35:38,453 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042938453"}]},"ts":"1734042938453"} 2024-12-12T22:35:38,456 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:35:38,510 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:35:38,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.5540 sec 2024-12-12T22:35:40,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-12T22:35:40,094 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-12T22:35:40,095 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:35:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,098 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=51, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T22:35:40,099 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=51, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,103 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,113 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits] 2024-12-12T22:35:40,128 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/0a04638bcadc4e989ad9cfb10e6f84e8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/0a04638bcadc4e989ad9cfb10e6f84e8 2024-12-12T22:35:40,128 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/38d40a15a7304a51983ae6cf2e5c76ad to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/38d40a15a7304a51983ae6cf2e5c76ad 2024-12-12T22:35:40,128 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e045856fb8a542a2adcecbe2b2b9a846 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/A/e045856fb8a542a2adcecbe2b2b9a846 2024-12-12T22:35:40,135 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/13d058a7f05b47a0817698a642bb48f6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/13d058a7f05b47a0817698a642bb48f6 2024-12-12T22:35:40,135 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/25a510d9d5694d26bf54a63b245dec76 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/25a510d9d5694d26bf54a63b245dec76 2024-12-12T22:35:40,141 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c182186ac90f496c8ea9d256e7ed51d0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/B/c182186ac90f496c8ea9d256e7ed51d0 2024-12-12T22:35:40,149 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfbbd63b2cb4285bf8a84cab76ec229 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/2cfbbd63b2cb4285bf8a84cab76ec229 2024-12-12T22:35:40,149 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/bb844aa5156940b4923251cd179b0d30 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/bb844aa5156940b4923251cd179b0d30 2024-12-12T22:35:40,149 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/708cdd2e2b90480194f961b1c95da475 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/C/708cdd2e2b90480194f961b1c95da475 2024-12-12T22:35:40,155 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits/383.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513/recovered.edits/383.seqid 2024-12-12T22:35:40,156 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,156 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:35:40,157 DEBUG [PEWorker-4 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:35:40,158 DEBUG [PEWorker-4 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T22:35:40,176 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f011fa3524246aaa29243bae60b0b57_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124f011fa3524246aaa29243bae60b0b57_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,178 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f50622393140748c45291bd8230404_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121239f50622393140748c45291bd8230404_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,178 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212316e2a08c669422a8394b16f1d86604f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212316e2a08c669422a8394b16f1d86604f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,178 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121238236dd0289f4e8393c84b028e1fe7cd_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121238236dd0289f4e8393c84b028e1fe7cd_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,178 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211be312bb9204139ba9b7778f73039d1_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121211be312bb9204139ba9b7778f73039d1_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,181 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121ce0a3cb709642d2844f6509fc6334ae_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412121ce0a3cb709642d2844f6509fc6334ae_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,183 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121262e07f14e10646afa526ea7f1b9c7284_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121262e07f14e10646afa526ea7f1b9c7284_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,184 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258da9a33bfca40eaa9cb3ea8db0b5da1_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258da9a33bfca40eaa9cb3ea8db0b5da1_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,184 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ad9ad4bade74876b88f3e3e44785735_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126ad9ad4bade74876b88f3e3e44785735_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,185 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b1a342661314e7e89e0fb0a30c3c61f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b1a342661314e7e89e0fb0a30c3c61f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,185 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a51477b7ea3472a822deb6df34667e6_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a51477b7ea3472a822deb6df34667e6_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,185 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127ab9bee0810a48108d03be8875dc2420_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127ab9bee0810a48108d03be8875dc2420_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,186 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d8870419f4d4617bcd6249afe3214ac_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127d8870419f4d4617bcd6249afe3214ac_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,186 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5785803d0e147189bfcb0eded2e9b2f_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212a5785803d0e147189bfcb0eded2e9b2f_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,187 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6ed3aa83aa749fab5f447e71889d3a4_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b6ed3aa83aa749fab5f447e71889d3a4_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,188 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e280c7956c2749928efdbb271889b012_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e280c7956c2749928efdbb271889b012_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,188 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e11529ff06dd462a8a38557fc10ea607_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e11529ff06dd462a8a38557fc10ea607_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,188 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e692ff3fb6fd483985857060e5d75177_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212e692ff3fb6fd483985857060e5d75177_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,189 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212da9579ecc45a4daa881f8addde51ce37_8c758bf11da38bc1c9062a0f3e05f513 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212da9579ecc45a4daa881f8addde51ce37_8c758bf11da38bc1c9062a0f3e05f513 2024-12-12T22:35:40,189 DEBUG [PEWorker-4 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:35:40,192 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=51, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T22:35:40,210 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:35:40,215 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:35:40,220 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=51, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,221 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:35:40,227 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734042940221"}]},"ts":"9223372036854775807"} 2024-12-12T22:35:40,244 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:35:40,244 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8c758bf11da38bc1c9062a0f3e05f513, NAME => 'TestAcidGuarantees,,1734042908352.8c758bf11da38bc1c9062a0f3e05f513.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:35:40,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:35:40,248 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734042940244"}]},"ts":"9223372036854775807"} 2024-12-12T22:35:40,271 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:35:40,287 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=51, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 192 msec 2024-12-12T22:35:40,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-12T22:35:40,402 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-12T22:35:40,416 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=243 (was 244), OpenFileDescriptor=455 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1536 (was 1350) - SystemLoadAverage LEAK? -, ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4483 (was 3540) - AvailableMemoryMB LEAK? - 2024-12-12T22:35:40,437 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=243, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=1536, ProcessCount=11, AvailableMemoryMB=4481 2024-12-12T22:35:40,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:35:40,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:35:40,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=52, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:35:40,446 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:35:40,446 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:40,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 52 2024-12-12T22:35:40,448 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:35:40,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=52 2024-12-12T22:35:40,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742036_1212 (size=963) 2024-12-12T22:35:40,482 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:35:40,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742037_1213 (size=53) 2024-12-12T22:35:40,508 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:35:40,508 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3e6ba90564d3642fab3e7bc05bfeebf6, disabling compactions & flushes 2024-12-12T22:35:40,508 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:40,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:40,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. after waiting 0 ms 2024-12-12T22:35:40,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:40,509 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:40,509 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:40,510 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:35:40,510 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734042940510"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042940510"}]},"ts":"1734042940510"} 2024-12-12T22:35:40,511 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:35:40,512 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:35:40,512 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042940512"}]},"ts":"1734042940512"} 2024-12-12T22:35:40,513 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:35:40,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=52 2024-12-12T22:35:40,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, ASSIGN}] 2024-12-12T22:35:40,622 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, ASSIGN 2024-12-12T22:35:40,623 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:35:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=52 2024-12-12T22:35:40,773 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=53 updating hbase:meta row=3e6ba90564d3642fab3e7bc05bfeebf6, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:40,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; OpenRegionProcedure 3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:35:40,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:40,945 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:40,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(7285): Opening region: {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:35:40,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:40,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:35:40,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(7327): checking encryption for 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:40,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(7330): checking classloading for 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:40,967 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:40,985 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:40,986 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e6ba90564d3642fab3e7bc05bfeebf6 columnFamilyName A 2024-12-12T22:35:40,986 DEBUG [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:40,990 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(327): Store=3e6ba90564d3642fab3e7bc05bfeebf6/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:40,990 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:41,004 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:41,005 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e6ba90564d3642fab3e7bc05bfeebf6 columnFamilyName B 2024-12-12T22:35:41,005 DEBUG [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:41,022 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(327): Store=3e6ba90564d3642fab3e7bc05bfeebf6/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:41,022 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:41,026 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:35:41,026 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e6ba90564d3642fab3e7bc05bfeebf6 columnFamilyName C 2024-12-12T22:35:41,026 DEBUG [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:35:41,027 INFO [StoreOpener-3e6ba90564d3642fab3e7bc05bfeebf6-1 {}] regionserver.HStore(327): Store=3e6ba90564d3642fab3e7bc05bfeebf6/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:35:41,027 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:41,028 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:41,032 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=52 2024-12-12T22:35:41,065 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:35:41,087 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(1085): writing seq id for 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:41,112 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:35:41,114 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(1102): Opened 3e6ba90564d3642fab3e7bc05bfeebf6; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71920813, jitterRate=0.07170362770557404}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:35:41,115 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegion(1001): Region open journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:41,117 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., pid=54, masterSystemTime=1734042940930 2024-12-12T22:35:41,121 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:41,121 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=54}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:41,126 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=53 updating hbase:meta row=3e6ba90564d3642fab3e7bc05bfeebf6, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:41,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-12T22:35:41,152 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; OpenRegionProcedure 3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 in 364 msec 2024-12-12T22:35:41,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-12T22:35:41,166 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, ASSIGN in 532 msec 2024-12-12T22:35:41,179 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:35:41,179 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042941179"}]},"ts":"1734042941179"} 2024-12-12T22:35:41,183 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:35:41,270 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=52, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:35:41,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 829 msec 2024-12-12T22:35:41,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=52 2024-12-12T22:35:41,556 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 52 completed 2024-12-12T22:35:41,557 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-12-12T22:35:41,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,581 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:41,588 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:41,589 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:35:41,591 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:35:41,593 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-12T22:35:41,639 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,641 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-12-12T22:35:41,701 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,703 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-12-12T22:35:41,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,753 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-12-12T22:35:41,798 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,800 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-12-12T22:35:41,837 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,839 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-12T22:35:41,892 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,894 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-12-12T22:35:41,935 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,936 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-12-12T22:35:41,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:41,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x598cfed4 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@521aad6f 2024-12-12T22:35:42,032 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c86f707, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:42,033 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ad882f to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f5b2180 2024-12-12T22:35:42,066 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34becda3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:35:42,095 DEBUG [hconnection-0x6964e509-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,097 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:42,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-12T22:35:42,103 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:42,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:42,104 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:42,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:42,117 DEBUG [hconnection-0x4e33b366-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,118 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43256, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,119 DEBUG [hconnection-0xae735aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,121 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:42,145 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:42,145 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:42,148 DEBUG [hconnection-0x6c8826ae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,149 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,150 DEBUG [hconnection-0x39aff2f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,151 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,158 DEBUG [hconnection-0x343a40b7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,160 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43300, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,169 DEBUG [hconnection-0x39e6b518-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,170 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,199 DEBUG [hconnection-0x60763863-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,200 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:42,212 DEBUG [hconnection-0x6c16c754-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,213 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43332, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043002210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043002210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043002211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043002215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,229 DEBUG [hconnection-0x6757d1bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:35:42,232 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43338, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:35:42,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043002234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/2a0a3c9297264b1d83db48043603d9bb is 50, key is test_row_0/A:col10/1734042942140/Put/seqid=0 2024-12-12T22:35:42,257 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,259 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:42,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:42,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742038_1214 (size=12001) 2024-12-12T22:35:42,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043002320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043002320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,326 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043002320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043002323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043002342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:42,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:42,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043002529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043002530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043002530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043002531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043002547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:42,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:42,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:42,721 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:42,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/2a0a3c9297264b1d83db48043603d9bb 2024-12-12T22:35:42,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:42,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/947e0975aaa948468ffb7c68ef7f756c is 50, key is test_row_0/B:col10/1734042942140/Put/seqid=0 2024-12-12T22:35:42,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043002834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043002834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043002834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742039_1215 (size=12001) 2024-12-12T22:35:42,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043002834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043002859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,880 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:42,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:42,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:42,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:42,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,035 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,035 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:43,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/947e0975aaa948468ffb7c68ef7f756c 2024-12-12T22:35:43,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 is 50, key is test_row_0/C:col10/1734042942140/Put/seqid=0 2024-12-12T22:35:43,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742040_1216 (size=12001) 2024-12-12T22:35:43,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043003344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,355 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043003354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:43,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043003363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043003363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043003372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,514 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,515 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,672 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:43,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,673 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:43,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 2024-12-12T22:35:43,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/2a0a3c9297264b1d83db48043603d9bb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb 2024-12-12T22:35:43,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:35:43,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/947e0975aaa948468ffb7c68ef7f756c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c 2024-12-12T22:35:43,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:35:43,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 2024-12-12T22:35:43,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8, entries=150, sequenceid=12, filesize=11.7 K 2024-12-12T22:35:43,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1638ms, sequenceid=12, compaction requested=false 2024-12-12T22:35:43,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:43,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:43,839 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-12T22:35:43,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:43,839 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:35:43,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:43,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:43,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/69ca4d175cd141ec97194fb94398f5b3 is 50, key is test_row_0/A:col10/1734042942208/Put/seqid=0 2024-12-12T22:35:43,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742041_1217 (size=12001) 2024-12-12T22:35:43,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:35:44,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:44,286 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/69ca4d175cd141ec97194fb94398f5b3 2024-12-12T22:35:44,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/82406327098945558f4416141f45cfa3 is 50, key is test_row_0/B:col10/1734042942208/Put/seqid=0 2024-12-12T22:35:44,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742042_1218 (size=12001) 2024-12-12T22:35:44,349 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/82406327098945558f4416141f45cfa3 2024-12-12T22:35:44,360 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:44,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:44,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6d1934fae0c4434a8afe13c82f45d7c5 is 50, key is test_row_0/C:col10/1734042942208/Put/seqid=0 2024-12-12T22:35:44,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742043_1219 (size=12001) 2024-12-12T22:35:44,399 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6d1934fae0c4434a8afe13c82f45d7c5 2024-12-12T22:35:44,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/69ca4d175cd141ec97194fb94398f5b3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3 2024-12-12T22:35:44,415 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:35:44,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/82406327098945558f4416141f45cfa3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3 2024-12-12T22:35:44,429 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:35:44,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6d1934fae0c4434a8afe13c82f45d7c5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5 2024-12-12T22:35:44,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043004382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043004430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043004431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043004431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043004387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,464 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:35:44,466 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 627ms, sequenceid=38, compaction requested=false 2024-12-12T22:35:44,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:44,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:44,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-12T22:35:44,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-12T22:35:44,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-12T22:35:44,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3700 sec 2024-12-12T22:35:44,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 2.3770 sec 2024-12-12T22:35:44,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T22:35:44,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:44,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:44,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:44,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:44,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:44,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:44,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:44,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/f769c84061c54aadb4dfdf15e52e2f5f is 50, key is test_row_0/A:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:44,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742044_1220 (size=12001) 2024-12-12T22:35:44,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/f769c84061c54aadb4dfdf15e52e2f5f 2024-12-12T22:35:44,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/80aaf6a28aba419085733fcce8224e9d is 50, key is test_row_0/B:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:44,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742045_1221 (size=12001) 2024-12-12T22:35:44,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043004618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043004633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043004633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043004636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043004738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043004739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043004739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043004746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043004943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043004942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043004950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:44,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043004951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/80aaf6a28aba419085733fcce8224e9d 2024-12-12T22:35:45,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/259756512b4542c2847af05d99330ffb is 50, key is test_row_0/C:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:45,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742046_1222 (size=12001) 2024-12-12T22:35:45,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/259756512b4542c2847af05d99330ffb 2024-12-12T22:35:45,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/f769c84061c54aadb4dfdf15e52e2f5f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f 2024-12-12T22:35:45,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T22:35:45,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/80aaf6a28aba419085733fcce8224e9d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d 2024-12-12T22:35:45,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T22:35:45,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/259756512b4542c2847af05d99330ffb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb 2024-12-12T22:35:45,136 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb, entries=150, sequenceid=51, filesize=11.7 K 2024-12-12T22:35:45,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 600ms, sequenceid=51, compaction requested=true 2024-12-12T22:35:45,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:45,140 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:45,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:45,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:45,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:45,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:45,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:45,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:45,143 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:45,151 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:45,151 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:45,151 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:45,151 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:45,151 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:45,151 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:45,151 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.2 K 2024-12-12T22:35:45,152 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.2 K 2024-12-12T22:35:45,154 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a0a3c9297264b1d83db48043603d9bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042942137 2024-12-12T22:35:45,154 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 947e0975aaa948468ffb7c68ef7f756c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042942137 2024-12-12T22:35:45,158 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69ca4d175cd141ec97194fb94398f5b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734042942208 2024-12-12T22:35:45,158 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 82406327098945558f4416141f45cfa3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734042942208 2024-12-12T22:35:45,158 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f769c84061c54aadb4dfdf15e52e2f5f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:45,163 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 80aaf6a28aba419085733fcce8224e9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:45,219 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#177 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:45,219 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/d585fc07f3844346a942471def9ea155 is 50, key is test_row_0/B:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:45,225 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#178 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:45,226 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/d80c51dcd75f40cc9d94ca7bb9de21c7 is 50, key is test_row_0/A:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:45,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:35:45,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:45,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:45,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742047_1223 (size=12104) 2024-12-12T22:35:45,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742048_1224 (size=12104) 2024-12-12T22:35:45,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043005272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043005273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,290 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/88f7277df0074efb80948396f47df68c is 50, key is test_row_0/A:col10/1734042944620/Put/seqid=0 2024-12-12T22:35:45,294 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/d585fc07f3844346a942471def9ea155 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d585fc07f3844346a942471def9ea155 2024-12-12T22:35:45,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043005277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043005279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,308 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/d80c51dcd75f40cc9d94ca7bb9de21c7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/d80c51dcd75f40cc9d94ca7bb9de21c7 2024-12-12T22:35:45,320 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into d585fc07f3844346a942471def9ea155(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:45,321 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:45,321 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042945142; duration=0sec 2024-12-12T22:35:45,321 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:45,321 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:45,321 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:45,326 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:45,326 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:45,326 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:45,326 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.2 K 2024-12-12T22:35:45,328 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting dacfb4bdc3a8427aa1a5d0bd213e72b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1734042942137 2024-12-12T22:35:45,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742049_1225 (size=12001) 2024-12-12T22:35:45,336 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d1934fae0c4434a8afe13c82f45d7c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734042942208 2024-12-12T22:35:45,337 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/88f7277df0074efb80948396f47df68c 2024-12-12T22:35:45,338 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 259756512b4542c2847af05d99330ffb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:45,339 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into d80c51dcd75f40cc9d94ca7bb9de21c7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:45,339 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:45,339 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042945139; duration=0sec 2024-12-12T22:35:45,339 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:45,339 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:45,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/2111dc7877eb4a0ba2b1f9096ca7c16d is 50, key is test_row_0/B:col10/1734042944620/Put/seqid=0 2024-12-12T22:35:45,386 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#181 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:45,387 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/de6c6a87f1e74eedb6acd0a6857ff051 is 50, key is test_row_0/C:col10/1734042944535/Put/seqid=0 2024-12-12T22:35:45,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043005391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,394 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043005392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043005407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043005407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742050_1226 (size=12001) 2024-12-12T22:35:45,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/2111dc7877eb4a0ba2b1f9096ca7c16d 2024-12-12T22:35:45,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742051_1227 (size=12104) 2024-12-12T22:35:45,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b73527c7f62747b4af1fe0ad65754cea is 50, key is test_row_0/C:col10/1734042944620/Put/seqid=0 2024-12-12T22:35:45,475 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/de6c6a87f1e74eedb6acd0a6857ff051 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/de6c6a87f1e74eedb6acd0a6857ff051 2024-12-12T22:35:45,494 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into de6c6a87f1e74eedb6acd0a6857ff051(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:45,494 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:45,494 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042945143; duration=0sec 2024-12-12T22:35:45,494 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:45,494 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:45,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742052_1228 (size=12001) 2024-12-12T22:35:45,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043005596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043005599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043005611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043005619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043005901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,910 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b73527c7f62747b4af1fe0ad65754cea 2024-12-12T22:35:45,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043005910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043005915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043005924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:45,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/88f7277df0074efb80948396f47df68c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c 2024-12-12T22:35:45,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T22:35:45,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/2111dc7877eb4a0ba2b1f9096ca7c16d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d 2024-12-12T22:35:45,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T22:35:45,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b73527c7f62747b4af1fe0ad65754cea as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea 2024-12-12T22:35:45,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea, entries=150, sequenceid=76, filesize=11.7 K 2024-12-12T22:35:45,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 713ms, sequenceid=76, compaction requested=false 2024-12-12T22:35:45,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:46,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-12T22:35:46,211 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-12T22:35:46,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:46,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-12T22:35:46,226 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:46,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T22:35:46,228 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:46,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:46,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T22:35:46,389 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,390 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-12T22:35:46,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:46,391 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:46,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:46,402 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/e980fa22fa7744ef9e0b8a1fa7ef2030 is 50, key is test_row_0/A:col10/1734042945277/Put/seqid=0 2024-12-12T22:35:46,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:46,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742053_1229 (size=12001) 2024-12-12T22:35:46,445 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/e980fa22fa7744ef9e0b8a1fa7ef2030 2024-12-12T22:35:46,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/650caa0f18534b1fb74cfcab9f3a3e1a is 50, key is test_row_0/B:col10/1734042945277/Put/seqid=0 2024-12-12T22:35:46,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742054_1230 (size=12001) 2024-12-12T22:35:46,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043006475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043006477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043006475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043006480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043006482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,491 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/650caa0f18534b1fb74cfcab9f3a3e1a 2024-12-12T22:35:46,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6ec21141c8e5462e87693d2ce37c2eaa is 50, key is test_row_0/C:col10/1734042945277/Put/seqid=0 2024-12-12T22:35:46,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742055_1231 (size=12001) 2024-12-12T22:35:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T22:35:46,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043006584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,585 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043006584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043006587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043006587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043006589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043006787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043006788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043006790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043006792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043006792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:46,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T22:35:46,929 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6ec21141c8e5462e87693d2ce37c2eaa 2024-12-12T22:35:46,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/e980fa22fa7744ef9e0b8a1fa7ef2030 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030 2024-12-12T22:35:46,960 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030, entries=150, sequenceid=90, filesize=11.7 K 2024-12-12T22:35:46,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/650caa0f18534b1fb74cfcab9f3a3e1a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a 2024-12-12T22:35:46,978 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a, entries=150, sequenceid=90, filesize=11.7 K 2024-12-12T22:35:46,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6ec21141c8e5462e87693d2ce37c2eaa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa 2024-12-12T22:35:46,990 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa, entries=150, sequenceid=90, filesize=11.7 K 2024-12-12T22:35:46,991 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 601ms, sequenceid=90, compaction requested=true 2024-12-12T22:35:46,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:46,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:46,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-12T22:35:46,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-12T22:35:46,994 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-12T22:35:46,994 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 764 msec 2024-12-12T22:35:46,996 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 778 msec 2024-12-12T22:35:47,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:47,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:35:47,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:47,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:47,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:47,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043007101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043007102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043007102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043007103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043007104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,122 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/61999ed2480441878e75f33974ba3653 is 50, key is test_row_0/A:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742056_1232 (size=14341) 2024-12-12T22:35:47,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/61999ed2480441878e75f33974ba3653 2024-12-12T22:35:47,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/47c869fd45b54587a6aa31696feb3e82 is 50, key is test_row_0/B:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742057_1233 (size=12001) 2024-12-12T22:35:47,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043007211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043007211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043007212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043007212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043007214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-12T22:35:47,332 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-12T22:35:47,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:47,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-12T22:35:47,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:47,335 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:47,336 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:47,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:47,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043007418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043007418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043007418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043007419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043007424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:47,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T22:35:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,619 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/47c869fd45b54587a6aa31696feb3e82 2024-12-12T22:35:47,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/51c9a168221c4c738df48d6422ceaef7 is 50, key is test_row_0/C:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:47,647 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T22:35:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:47,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742058_1234 (size=12001) 2024-12-12T22:35:47,689 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/51c9a168221c4c738df48d6422ceaef7 2024-12-12T22:35:47,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/61999ed2480441878e75f33974ba3653 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653 2024-12-12T22:35:47,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653, entries=200, sequenceid=118, filesize=14.0 K 2024-12-12T22:35:47,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/47c869fd45b54587a6aa31696feb3e82 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82 2024-12-12T22:35:47,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043007722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043007726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043007726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,731 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T22:35:47,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/51c9a168221c4c738df48d6422ceaef7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7 2024-12-12T22:35:47,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043007726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:47,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043007737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,740 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T22:35:47,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 646ms, sequenceid=118, compaction requested=true 2024-12-12T22:35:47,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:47,743 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:47,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:47,743 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:47,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:47,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:47,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:47,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:47,746 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50447 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:47,746 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:47,746 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,746 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:47,746 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:47,746 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,746 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/d80c51dcd75f40cc9d94ca7bb9de21c7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=49.3 K 2024-12-12T22:35:47,746 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d585fc07f3844346a942471def9ea155, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=47.0 K 2024-12-12T22:35:47,747 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d585fc07f3844346a942471def9ea155, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:47,748 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d80c51dcd75f40cc9d94ca7bb9de21c7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:47,749 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2111dc7877eb4a0ba2b1f9096ca7c16d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734042944615 2024-12-12T22:35:47,749 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88f7277df0074efb80948396f47df68c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734042944615 2024-12-12T22:35:47,749 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 650caa0f18534b1fb74cfcab9f3a3e1a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042945271 2024-12-12T22:35:47,750 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 47c869fd45b54587a6aa31696feb3e82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:47,751 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e980fa22fa7744ef9e0b8a1fa7ef2030, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042945271 2024-12-12T22:35:47,752 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61999ed2480441878e75f33974ba3653, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:47,770 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#189 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:47,771 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/49a15775173d4695a5f748597e808081 is 50, key is test_row_0/B:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,777 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#190 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:47,779 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/eb6acbe6a78e47619735d83606635d22 is 50, key is test_row_0/A:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742059_1235 (size=12241) 2024-12-12T22:35:47,803 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:47,807 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/49a15775173d4695a5f748597e808081 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/49a15775173d4695a5f748597e808081 2024-12-12T22:35:47,811 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-12T22:35:47,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742060_1236 (size=12241) 2024-12-12T22:35:47,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,815 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:47,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:47,819 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into 49a15775173d4695a5f748597e808081(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:47,819 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:47,819 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=12, startTime=1734042947743; duration=0sec 2024-12-12T22:35:47,819 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:47,819 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:47,819 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:47,822 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:47,822 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:47,822 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:47,822 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/de6c6a87f1e74eedb6acd0a6857ff051, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=47.0 K 2024-12-12T22:35:47,823 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting de6c6a87f1e74eedb6acd0a6857ff051, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1734042944388 2024-12-12T22:35:47,827 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b73527c7f62747b4af1fe0ad65754cea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1734042944615 2024-12-12T22:35:47,828 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ec21141c8e5462e87693d2ce37c2eaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734042945271 2024-12-12T22:35:47,828 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 51c9a168221c4c738df48d6422ceaef7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:47,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/40fcc3e1c3174e17996d3f2b219c5115 is 50, key is test_row_0/A:col10/1734042947102/Put/seqid=0 2024-12-12T22:35:47,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742061_1237 (size=12001) 2024-12-12T22:35:47,882 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#192 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:47,883 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/5640cfb9dda34dbfb00637f385a8f4d3 is 50, key is test_row_0/C:col10/1734042947095/Put/seqid=0 2024-12-12T22:35:47,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:48,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742062_1238 (size=12241) 2024-12-12T22:35:48,069 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/5640cfb9dda34dbfb00637f385a8f4d3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/5640cfb9dda34dbfb00637f385a8f4d3 2024-12-12T22:35:48,093 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 5640cfb9dda34dbfb00637f385a8f4d3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:48,093 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:48,093 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=12, startTime=1734042947744; duration=0sec 2024-12-12T22:35:48,094 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:48,094 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:48,235 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/eb6acbe6a78e47619735d83606635d22 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eb6acbe6a78e47619735d83606635d22 2024-12-12T22:35:48,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:48,237 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/40fcc3e1c3174e17996d3f2b219c5115 2024-12-12T22:35:48,249 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into eb6acbe6a78e47619735d83606635d22(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:48,250 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:48,250 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=12, startTime=1734042947742; duration=0sec 2024-12-12T22:35:48,250 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:48,250 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:48,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/ba1c2d853cbb413e82fc9a7bae8e6af4 is 50, key is test_row_0/B:col10/1734042947102/Put/seqid=0 2024-12-12T22:35:48,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742063_1239 (size=12001) 2024-12-12T22:35:48,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043008277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043008282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043008282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043008285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043008287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043008390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043008390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043008390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043008392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043008392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:48,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043008594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043008596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043008596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043008596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043008597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,679 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/ba1c2d853cbb413e82fc9a7bae8e6af4 2024-12-12T22:35:48,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4ac9a4171bb24f63b73c2daf751a1494 is 50, key is test_row_0/C:col10/1734042947102/Put/seqid=0 2024-12-12T22:35:48,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742064_1240 (size=12001) 2024-12-12T22:35:48,701 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4ac9a4171bb24f63b73c2daf751a1494 2024-12-12T22:35:48,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/40fcc3e1c3174e17996d3f2b219c5115 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115 2024-12-12T22:35:48,718 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115, entries=150, sequenceid=126, filesize=11.7 K 2024-12-12T22:35:48,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/ba1c2d853cbb413e82fc9a7bae8e6af4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4 2024-12-12T22:35:48,725 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4, entries=150, sequenceid=126, filesize=11.7 K 2024-12-12T22:35:48,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4ac9a4171bb24f63b73c2daf751a1494 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494 2024-12-12T22:35:48,734 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494, entries=150, sequenceid=126, filesize=11.7 K 2024-12-12T22:35:48,735 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 920ms, sequenceid=126, compaction requested=false 2024-12-12T22:35:48,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:48,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:48,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-12T22:35:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-12T22:35:48,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-12T22:35:48,740 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4010 sec 2024-12-12T22:35:48,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.4060 sec 2024-12-12T22:35:48,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-12T22:35:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:48,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:48,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:48,925 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043008904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043008905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043008906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043008926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/da1249cb58b241d881a1d56bf153ba11 is 50, key is test_row_0/A:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:48,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:48,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043008926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:48,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742065_1241 (size=14541) 2024-12-12T22:35:48,970 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/da1249cb58b241d881a1d56bf153ba11 2024-12-12T22:35:48,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/fe4205f2d7ef42ad9651ac6962f229db is 50, key is test_row_0/B:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:49,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742066_1242 (size=12151) 2024-12-12T22:35:49,025 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/fe4205f2d7ef42ad9651ac6962f229db 2024-12-12T22:35:49,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043009028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043009029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043009029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043009029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043009030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/0dbc0bbac7484b7896ba6ded5476b66f is 50, key is test_row_0/C:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:49,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742067_1243 (size=12151) 2024-12-12T22:35:49,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/0dbc0bbac7484b7896ba6ded5476b66f 2024-12-12T22:35:49,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/da1249cb58b241d881a1d56bf153ba11 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11 2024-12-12T22:35:49,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11, entries=200, sequenceid=159, filesize=14.2 K 2024-12-12T22:35:49,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/fe4205f2d7ef42ad9651ac6962f229db as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db 2024-12-12T22:35:49,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db, entries=150, sequenceid=159, filesize=11.9 K 2024-12-12T22:35:49,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/0dbc0bbac7484b7896ba6ded5476b66f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f 2024-12-12T22:35:49,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f, entries=150, sequenceid=159, filesize=11.9 K 2024-12-12T22:35:49,120 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=26.84 KB/27480 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 220ms, sequenceid=159, compaction requested=true 2024-12-12T22:35:49,120 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:49,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:49,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:49,120 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:49,120 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:49,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:49,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:49,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:49,121 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:49,123 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38783 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:49,123 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:49,123 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:49,123 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:49,123 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,123 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,123 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/49a15775173d4695a5f748597e808081, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.5 K 2024-12-12T22:35:49,123 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eb6acbe6a78e47619735d83606635d22, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=37.9 K 2024-12-12T22:35:49,124 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 49a15775173d4695a5f748597e808081, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:49,125 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb6acbe6a78e47619735d83606635d22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:49,125 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ba1c2d853cbb413e82fc9a7bae8e6af4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1734042947098 2024-12-12T22:35:49,126 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40fcc3e1c3174e17996d3f2b219c5115, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1734042947098 2024-12-12T22:35:49,127 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting fe4205f2d7ef42ad9651ac6962f229db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:49,130 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting da1249cb58b241d881a1d56bf153ba11, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:49,149 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:49,149 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/91cd6c252b174dae9309368e55b56ecd is 50, key is test_row_0/B:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:49,158 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#199 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:49,159 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/72ba09189fa14a23b65fcfe491072a00 is 50, key is test_row_0/A:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:49,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742068_1244 (size=12493) 2024-12-12T22:35:49,192 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/91cd6c252b174dae9309368e55b56ecd as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/91cd6c252b174dae9309368e55b56ecd 2024-12-12T22:35:49,201 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into 91cd6c252b174dae9309368e55b56ecd(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:49,201 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:49,201 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042949120; duration=0sec 2024-12-12T22:35:49,201 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:49,201 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:49,201 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:49,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742069_1245 (size=12493) 2024-12-12T22:35:49,203 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:49,203 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:49,203 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,203 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/5640cfb9dda34dbfb00637f385a8f4d3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.5 K 2024-12-12T22:35:49,205 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5640cfb9dda34dbfb00637f385a8f4d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734042946473 2024-12-12T22:35:49,205 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ac9a4171bb24f63b73c2daf751a1494, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1734042947098 2024-12-12T22:35:49,206 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0dbc0bbac7484b7896ba6ded5476b66f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:49,218 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/72ba09189fa14a23b65fcfe491072a00 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/72ba09189fa14a23b65fcfe491072a00 2024-12-12T22:35:49,225 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#200 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:49,226 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/0493466700d940dd925002d6df15d66e is 50, key is test_row_0/C:col10/1734042948262/Put/seqid=0 2024-12-12T22:35:49,237 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into 72ba09189fa14a23b65fcfe491072a00(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:49,237 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:49,237 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042949120; duration=0sec 2024-12-12T22:35:49,237 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:49,237 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:49,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742070_1246 (size=12493) 2024-12-12T22:35:49,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:49,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:49,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,251 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/0493466700d940dd925002d6df15d66e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0493466700d940dd925002d6df15d66e 2024-12-12T22:35:49,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/eca1cd05b6c54f20b5fb3e836fb6723e is 50, key is test_row_0/A:col10/1734042949245/Put/seqid=0 2024-12-12T22:35:49,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742071_1247 (size=12151) 2024-12-12T22:35:49,268 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 0493466700d940dd925002d6df15d66e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:49,268 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:49,268 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042949121; duration=0sec 2024-12-12T22:35:49,268 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:49,268 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:49,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043009292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043009294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043009294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043009295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043009294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043009397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043009398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043009400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043009400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043009403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-12T22:35:49,444 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-12T22:35:49,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:49,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-12T22:35:49,453 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:49,454 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:49,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:49,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:49,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043009600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043009600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043009604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043009604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,608 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043009607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/eca1cd05b6c54f20b5fb3e836fb6723e 2024-12-12T22:35:49,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c6e1c19958784970b3d6c31cb5a3c21e is 50, key is test_row_0/B:col10/1734042949245/Put/seqid=0 2024-12-12T22:35:49,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742072_1248 (size=12151) 2024-12-12T22:35:49,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c6e1c19958784970b3d6c31cb5a3c21e 2024-12-12T22:35:49,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/1e3dbadf806e421ab287ce69fc9fe99f is 50, key is test_row_0/C:col10/1734042949245/Put/seqid=0 2024-12-12T22:35:49,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742073_1249 (size=12151) 2024-12-12T22:35:49,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/1e3dbadf806e421ab287ce69fc9fe99f 2024-12-12T22:35:49,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/eca1cd05b6c54f20b5fb3e836fb6723e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e 2024-12-12T22:35:49,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e, entries=150, sequenceid=176, filesize=11.9 K 2024-12-12T22:35:49,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c6e1c19958784970b3d6c31cb5a3c21e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e 2024-12-12T22:35:49,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:49,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:49,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e, entries=150, sequenceid=176, filesize=11.9 K 2024-12-12T22:35:49,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:49,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/1e3dbadf806e421ab287ce69fc9fe99f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f 2024-12-12T22:35:49,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f, entries=150, sequenceid=176, filesize=11.9 K 2024-12-12T22:35:49,793 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 548ms, sequenceid=176, compaction requested=false 2024-12-12T22:35:49,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:49,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:49,905 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:49,906 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:49,911 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/b01f1dca8340497f9b164a94e465f15a is 50, key is test_row_0/A:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:49,918 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:49,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:49,919 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043009916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:49,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043009918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043009921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043009921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:49,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043009921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:49,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742074_1250 (size=14541) 2024-12-12T22:35:50,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043010020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043010022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,025 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043010024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043010024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,026 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043010025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,072 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:50,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043010224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043010224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,227 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043010227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043010229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043010229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/b01f1dca8340497f9b164a94e465f15a 2024-12-12T22:35:50,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/b10587d196ba4071a0685976a5a51033 is 50, key is test_row_0/B:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:50,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742075_1251 (size=12151) 2024-12-12T22:35:50,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,527 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043010526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043010528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043010528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,532 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043010531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:50,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043010533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:50,685 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,685 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,686 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/b10587d196ba4071a0685976a5a51033 2024-12-12T22:35:50,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/9fddc63d62f0424fb38447564dbac592 is 50, key is test_row_0/C:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:50,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742076_1252 (size=12151) 2024-12-12T22:35:50,837 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,838 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,838 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,990 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:50,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:50,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:50,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:50,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:50,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:51,030 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043011029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043011032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,036 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:51,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043011035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,037 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:51,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043011037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:51,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043011038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,142 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:51,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:51,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,143 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:51,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:51,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:51,174 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/9fddc63d62f0424fb38447564dbac592 2024-12-12T22:35:51,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/b01f1dca8340497f9b164a94e465f15a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a 2024-12-12T22:35:51,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a, entries=200, sequenceid=200, filesize=14.2 K 2024-12-12T22:35:51,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/b10587d196ba4071a0685976a5a51033 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033 2024-12-12T22:35:51,190 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T22:35:51,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/9fddc63d62f0424fb38447564dbac592 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592 2024-12-12T22:35:51,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T22:35:51,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1291ms, sequenceid=200, compaction requested=true 2024-12-12T22:35:51,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:51,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:51,196 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:51,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:51,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:51,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:51,197 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:51,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:51,197 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:51,197 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39185 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:51,198 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,198 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,198 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/91cd6c252b174dae9309368e55b56ecd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.9 K 2024-12-12T22:35:51,198 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/72ba09189fa14a23b65fcfe491072a00, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=38.3 K 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 91cd6c252b174dae9309368e55b56ecd, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72ba09189fa14a23b65fcfe491072a00, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c6e1c19958784970b3d6c31cb5a3c21e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734042949239 2024-12-12T22:35:51,198 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting eca1cd05b6c54f20b5fb3e836fb6723e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734042949239 2024-12-12T22:35:51,199 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b10587d196ba4071a0685976a5a51033, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:51,199 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b01f1dca8340497f9b164a94e465f15a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:51,212 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:51,213 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/d1c7f0194ba54f9092984c838cbe2096 is 50, key is test_row_0/B:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:51,217 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#208 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:51,218 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/deaf1a61866140ddb3f0dc2a2ee08423 is 50, key is test_row_0/A:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:51,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742077_1253 (size=12595) 2024-12-12T22:35:51,234 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/d1c7f0194ba54f9092984c838cbe2096 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d1c7f0194ba54f9092984c838cbe2096 2024-12-12T22:35:51,240 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into d1c7f0194ba54f9092984c838cbe2096(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:51,240 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:51,240 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042951196; duration=0sec 2024-12-12T22:35:51,240 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:51,240 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:51,240 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:51,242 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:51,242 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:51,242 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,242 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0493466700d940dd925002d6df15d66e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=35.9 K 2024-12-12T22:35:51,242 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0493466700d940dd925002d6df15d66e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1734042948262 2024-12-12T22:35:51,243 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e3dbadf806e421ab287ce69fc9fe99f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1734042949239 2024-12-12T22:35:51,243 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fddc63d62f0424fb38447564dbac592, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:51,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742078_1254 (size=12595) 2024-12-12T22:35:51,257 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#209 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:51,258 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/39377278ec054fd4842dec67bd51b90e is 50, key is test_row_0/C:col10/1734042949293/Put/seqid=0 2024-12-12T22:35:51,261 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/deaf1a61866140ddb3f0dc2a2ee08423 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/deaf1a61866140ddb3f0dc2a2ee08423 2024-12-12T22:35:51,267 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into deaf1a61866140ddb3f0dc2a2ee08423(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:51,267 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:51,267 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042951196; duration=0sec 2024-12-12T22:35:51,267 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:51,267 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:51,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742079_1255 (size=12595) 2024-12-12T22:35:51,295 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:51,303 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-12T22:35:51,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,304 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:51,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:51,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/871419e138d74f6b9c578d18ff38be8f is 50, key is test_row_0/A:col10/1734042949918/Put/seqid=0 2024-12-12T22:35:51,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742080_1256 (size=12151) 2024-12-12T22:35:51,332 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/871419e138d74f6b9c578d18ff38be8f 2024-12-12T22:35:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/81ed8e74458c4301b7b1824f2bede6f1 is 50, key is test_row_0/B:col10/1734042949918/Put/seqid=0 2024-12-12T22:35:51,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742081_1257 (size=12151) 2024-12-12T22:35:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:51,674 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/39377278ec054fd4842dec67bd51b90e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/39377278ec054fd4842dec67bd51b90e 2024-12-12T22:35:51,679 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 39377278ec054fd4842dec67bd51b90e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:51,679 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:51,679 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042951197; duration=0sec 2024-12-12T22:35:51,679 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:51,679 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:51,766 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/81ed8e74458c4301b7b1824f2bede6f1 2024-12-12T22:35:51,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/278852ead030498382a1eb7ae562d5c0 is 50, key is test_row_0/C:col10/1734042949918/Put/seqid=0 2024-12-12T22:35:51,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742082_1258 (size=12151) 2024-12-12T22:35:51,779 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/278852ead030498382a1eb7ae562d5c0 2024-12-12T22:35:51,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/871419e138d74f6b9c578d18ff38be8f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f 2024-12-12T22:35:51,807 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f, entries=150, sequenceid=216, filesize=11.9 K 2024-12-12T22:35:51,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/81ed8e74458c4301b7b1824f2bede6f1 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1 2024-12-12T22:35:51,813 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1, entries=150, sequenceid=216, filesize=11.9 K 2024-12-12T22:35:51,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/278852ead030498382a1eb7ae562d5c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0 2024-12-12T22:35:51,820 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0, entries=150, sequenceid=216, filesize=11.9 K 2024-12-12T22:35:51,822 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 518ms, sequenceid=216, compaction requested=false 2024-12-12T22:35:51,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:51,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:51,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-12T22:35:51,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-12T22:35:51,825 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-12T22:35:51,825 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3700 sec 2024-12-12T22:35:51,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 2.3780 sec 2024-12-12T22:35:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:52,050 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:35:52,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:52,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:52,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:52,051 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5750dc57bb144ddfb7180faec5523456 is 50, key is test_row_0/A:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043012069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043012073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043012075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043012075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043012102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742083_1259 (size=12151) 2024-12-12T22:35:52,113 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5750dc57bb144ddfb7180faec5523456 2024-12-12T22:35:52,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/7f21250bd5a84120b469c5f9febda6f4 is 50, key is test_row_0/B:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742084_1260 (size=12151) 2024-12-12T22:35:52,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043012175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,180 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043012178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043012180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043012180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043012205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043012381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043012382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043012382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,384 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043012383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043012407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/7f21250bd5a84120b469c5f9febda6f4 2024-12-12T22:35:52,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4c8e2997c174426aab5551f305f5b40f is 50, key is test_row_0/C:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742085_1261 (size=12151) 2024-12-12T22:35:52,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4c8e2997c174426aab5551f305f5b40f 2024-12-12T22:35:52,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5750dc57bb144ddfb7180faec5523456 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456 2024-12-12T22:35:52,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456, entries=150, sequenceid=230, filesize=11.9 K 2024-12-12T22:35:52,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/7f21250bd5a84120b469c5f9febda6f4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4 2024-12-12T22:35:52,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4, entries=150, sequenceid=230, filesize=11.9 K 2024-12-12T22:35:52,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/4c8e2997c174426aab5551f305f5b40f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f 2024-12-12T22:35:52,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f, entries=150, sequenceid=230, filesize=11.9 K 2024-12-12T22:35:52,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 612ms, sequenceid=230, compaction requested=true 2024-12-12T22:35:52,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:52,662 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:52,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:52,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:52,662 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:52,663 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:52,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:52,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:52,663 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:52,664 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:52,664 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:52,664 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:52,664 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:52,664 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/deaf1a61866140ddb3f0dc2a2ee08423, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.0 K 2024-12-12T22:35:52,664 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:52,664 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:52,664 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d1c7f0194ba54f9092984c838cbe2096, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.0 K 2024-12-12T22:35:52,664 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting deaf1a61866140ddb3f0dc2a2ee08423, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:52,666 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 871419e138d74f6b9c578d18ff38be8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1734042949917 2024-12-12T22:35:52,666 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d1c7f0194ba54f9092984c838cbe2096, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:52,666 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5750dc57bb144ddfb7180faec5523456, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:52,666 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 81ed8e74458c4301b7b1824f2bede6f1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1734042949917 2024-12-12T22:35:52,667 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f21250bd5a84120b469c5f9febda6f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:52,681 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:52,682 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/8205cd8d516c4f6daf88b82b74766076 is 50, key is test_row_0/A:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:52,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:52,689 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#217 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:52,689 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/a542453e3b2644aa9154d6ff32391126 is 50, key is test_row_0/B:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742086_1262 (size=12697) 2024-12-12T22:35:52,704 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/8205cd8d516c4f6daf88b82b74766076 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8205cd8d516c4f6daf88b82b74766076 2024-12-12T22:35:52,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043012700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043012702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043012702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043012703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,710 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into 8205cd8d516c4f6daf88b82b74766076(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:52,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:52,710 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042952662; duration=0sec 2024-12-12T22:35:52,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:52,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:52,710 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:52,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043012710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742087_1263 (size=12697) 2024-12-12T22:35:52,713 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:52,713 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:52,713 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/6f0e49f4daa94892b028ad971d5f8949 is 50, key is test_row_0/A:col10/1734042952686/Put/seqid=0 2024-12-12T22:35:52,713 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:52,713 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/39377278ec054fd4842dec67bd51b90e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.0 K 2024-12-12T22:35:52,714 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39377278ec054fd4842dec67bd51b90e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042949289 2024-12-12T22:35:52,714 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 278852ead030498382a1eb7ae562d5c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1734042949917 2024-12-12T22:35:52,714 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8e2997c174426aab5551f305f5b40f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:52,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742088_1264 (size=12151) 2024-12-12T22:35:52,721 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/6f0e49f4daa94892b028ad971d5f8949 2024-12-12T22:35:52,727 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:52,727 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b383b41733f94891a119afa0e72257de is 50, key is test_row_0/C:col10/1734042952047/Put/seqid=0 2024-12-12T22:35:52,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742089_1265 (size=12697) 2024-12-12T22:35:52,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c72a74138a954e8e83c0b48b899d6905 is 50, key is test_row_0/B:col10/1734042952686/Put/seqid=0 2024-12-12T22:35:52,740 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b383b41733f94891a119afa0e72257de as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b383b41733f94891a119afa0e72257de 2024-12-12T22:35:52,745 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into b383b41733f94891a119afa0e72257de(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:52,745 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:52,745 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042952663; duration=0sec 2024-12-12T22:35:52,745 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:52,745 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:52,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742090_1266 (size=12151) 2024-12-12T22:35:52,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c72a74138a954e8e83c0b48b899d6905 2024-12-12T22:35:52,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/c54511e9ccad4c3eac6021e4edf1f8b9 is 50, key is test_row_0/C:col10/1734042952686/Put/seqid=0 2024-12-12T22:35:52,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742091_1267 (size=12151) 2024-12-12T22:35:52,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/c54511e9ccad4c3eac6021e4edf1f8b9 2024-12-12T22:35:52,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/6f0e49f4daa94892b028ad971d5f8949 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949 2024-12-12T22:35:52,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043012807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043012807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043012807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043012807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:52,813 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949, entries=150, sequenceid=255, filesize=11.9 K 2024-12-12T22:35:52,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c72a74138a954e8e83c0b48b899d6905 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905 2024-12-12T22:35:52,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905, entries=150, sequenceid=255, filesize=11.9 K 2024-12-12T22:35:52,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/c54511e9ccad4c3eac6021e4edf1f8b9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9 2024-12-12T22:35:52,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9, entries=150, sequenceid=255, filesize=11.9 K 2024-12-12T22:35:52,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 137ms, sequenceid=255, compaction requested=false 2024-12-12T22:35:52,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:53,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:53,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:53,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:53,029 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/34c8b5d450064205a3c1af810348834f is 50, key is test_row_0/A:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:53,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742092_1268 (size=12301) 2024-12-12T22:35:53,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/34c8b5d450064205a3c1af810348834f 2024-12-12T22:35:53,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043013040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043013041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/68ab77f80f01405184099ce897197593 is 50, key is test_row_0/B:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:53,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043013045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043013045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742093_1269 (size=12301) 2024-12-12T22:35:53,119 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/a542453e3b2644aa9154d6ff32391126 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a542453e3b2644aa9154d6ff32391126 2024-12-12T22:35:53,125 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into a542453e3b2644aa9154d6ff32391126(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:53,125 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:53,125 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042952662; duration=0sec 2024-12-12T22:35:53,125 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:53,125 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:53,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043013146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043013151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043013151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043013156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043013213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,350 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043013350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043013356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043013356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043013369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,474 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/68ab77f80f01405184099ce897197593 2024-12-12T22:35:53,485 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 is 50, key is test_row_0/C:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:53,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742094_1270 (size=12301) 2024-12-12T22:35:53,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-12T22:35:53,579 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-12T22:35:53,582 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:53,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees 2024-12-12T22:35:53,584 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:53,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T22:35:53,584 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=63, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:53,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:53,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043013653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,661 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043013659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043013659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:53,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043013671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T22:35:53,736 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T22:35:53,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:53,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T22:35:53,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:53,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T22:35:53,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:53,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,890 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] handler.RSProcedureHandler(58): pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=64 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=64 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:53,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=270 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 2024-12-12T22:35:53,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/34c8b5d450064205a3c1af810348834f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f 2024-12-12T22:35:53,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T22:35:53,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/68ab77f80f01405184099ce897197593 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593 2024-12-12T22:35:53,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T22:35:53,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 2024-12-12T22:35:53,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4, entries=150, sequenceid=270, filesize=12.0 K 2024-12-12T22:35:53,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 939ms, sequenceid=270, compaction requested=true 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:53,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:53,952 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:53,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:53,954 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,954 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:53,954 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a542453e3b2644aa9154d6ff32391126, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.3 K 2024-12-12T22:35:53,954 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8205cd8d516c4f6daf88b82b74766076, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.3 K 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8205cd8d516c4f6daf88b82b74766076, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:53,954 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a542453e3b2644aa9154d6ff32391126, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:53,955 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c72a74138a954e8e83c0b48b899d6905, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1734042952071 2024-12-12T22:35:53,955 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 68ab77f80f01405184099ce897197593, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:53,955 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f0e49f4daa94892b028ad971d5f8949, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1734042952071 2024-12-12T22:35:53,955 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34c8b5d450064205a3c1af810348834f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:53,967 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:53,968 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/066689afb5e743199f45f589b36beeb9 is 50, key is test_row_0/B:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:53,978 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:53,978 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/32addceec6a646779693e5b7dd935e3b is 50, key is test_row_0/A:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:54,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742096_1272 (size=12949) 2024-12-12T22:35:54,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742095_1271 (size=12949) 2024-12-12T22:35:54,021 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/066689afb5e743199f45f589b36beeb9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/066689afb5e743199f45f589b36beeb9 2024-12-12T22:35:54,030 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into 066689afb5e743199f45f589b36beeb9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:54,030 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:54,030 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042953952; duration=0sec 2024-12-12T22:35:54,030 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:54,030 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:54,031 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:54,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:54,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:54,032 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,032 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b383b41733f94891a119afa0e72257de, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.3 K 2024-12-12T22:35:54,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b383b41733f94891a119afa0e72257de, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1734042952042 2024-12-12T22:35:54,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c54511e9ccad4c3eac6021e4edf1f8b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1734042952071 2024-12-12T22:35:54,033 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f9b49a6c5dc4658ad83eb1b17faa2c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:54,043 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=64 2024-12-12T22:35:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,044 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:35:54,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:54,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,051 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#227 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:54,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/3bf2f0f77f274fc8bce799935da140cb is 50, key is test_row_0/A:col10/1734042953043/Put/seqid=0 2024-12-12T22:35:54,056 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/88d5a8c37210462ea1a78302379beb3b is 50, key is test_row_0/C:col10/1734042953011/Put/seqid=0 2024-12-12T22:35:54,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742098_1274 (size=12949) 2024-12-12T22:35:54,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742097_1273 (size=12301) 2024-12-12T22:35:54,094 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/3bf2f0f77f274fc8bce799935da140cb 2024-12-12T22:35:54,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4ffb665ff3454e0aa730b780600368f6 is 50, key is test_row_0/B:col10/1734042953043/Put/seqid=0 2024-12-12T22:35:54,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742099_1275 (size=12301) 2024-12-12T22:35:54,128 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4ffb665ff3454e0aa730b780600368f6 2024-12-12T22:35:54,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/d68f187126cb4eac9380c1c4fda0750c is 50, key is test_row_0/C:col10/1734042953043/Put/seqid=0 2024-12-12T22:35:54,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742100_1276 (size=12301) 2024-12-12T22:35:54,162 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/d68f187126cb4eac9380c1c4fda0750c 2024-12-12T22:35:54,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:54,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:54,175 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/3bf2f0f77f274fc8bce799935da140cb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb 2024-12-12T22:35:54,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043014182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043014182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,187 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb, entries=150, sequenceid=296, filesize=12.0 K 2024-12-12T22:35:54,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T22:35:54,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4ffb665ff3454e0aa730b780600368f6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6 2024-12-12T22:35:54,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043014187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043014187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,195 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6, entries=150, sequenceid=296, filesize=12.0 K 2024-12-12T22:35:54,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/d68f187126cb4eac9380c1c4fda0750c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c 2024-12-12T22:35:54,206 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c, entries=150, sequenceid=296, filesize=12.0 K 2024-12-12T22:35:54,207 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 163ms, sequenceid=296, compaction requested=false 2024-12-12T22:35:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-12-12T22:35:54,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=64 2024-12-12T22:35:54,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-12T22:35:54,210 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 624 msec 2024-12-12T22:35:54,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=63, table=TestAcidGuarantees in 628 msec 2024-12-12T22:35:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:54,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:54,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:54,223 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/97e07d7bc729483a84092aa62fbaea4e is 50, key is test_row_0/A:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:54,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742101_1277 (size=12301) 2024-12-12T22:35:54,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043014288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043014288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043014289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043014292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043014292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043014393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,416 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/32addceec6a646779693e5b7dd935e3b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/32addceec6a646779693e5b7dd935e3b 2024-12-12T22:35:54,429 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into 32addceec6a646779693e5b7dd935e3b(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:54,429 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:54,429 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042953952; duration=0sec 2024-12-12T22:35:54,429 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:54,429 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:54,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043014491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043014491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043014495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043014496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,500 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/88d5a8c37210462ea1a78302379beb3b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/88d5a8c37210462ea1a78302379beb3b 2024-12-12T22:35:54,511 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 88d5a8c37210462ea1a78302379beb3b(size=12.6 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:54,511 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:54,511 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042953952; duration=0sec 2024-12-12T22:35:54,511 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:54,511 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:54,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043014598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/97e07d7bc729483a84092aa62fbaea4e 2024-12-12T22:35:54,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/495686f980824b9c8f8143ce0fb10997 is 50, key is test_row_0/B:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:54,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742102_1278 (size=12301) 2024-12-12T22:35:54,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/495686f980824b9c8f8143ce0fb10997 2024-12-12T22:35:54,661 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7014cc67b0784f7fa8213f4c698d98d0 is 50, key is test_row_0/C:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:54,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742103_1279 (size=12301) 2024-12-12T22:35:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-12T22:35:54,689 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-12T22:35:54,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:54,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees 2024-12-12T22:35:54,693 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:54,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T22:35:54,694 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=65, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:54,694 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T22:35:54,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043014794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043014794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043014799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043014800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T22:35:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,847 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:54,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:54,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:54,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:54,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043014902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T22:35:54,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:54,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T22:35:54,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:54,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:54,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] handler.RSProcedureHandler(58): pid=66 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:55,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=66 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:55,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=66 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:55,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=310 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7014cc67b0784f7fa8213f4c698d98d0 2024-12-12T22:35:55,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/97e07d7bc729483a84092aa62fbaea4e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e 2024-12-12T22:35:55,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T22:35:55,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/495686f980824b9c8f8143ce0fb10997 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997 2024-12-12T22:35:55,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T22:35:55,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/7014cc67b0784f7fa8213f4c698d98d0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0 2024-12-12T22:35:55,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0, entries=150, sequenceid=310, filesize=12.0 K 2024-12-12T22:35:55,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 880ms, sequenceid=310, compaction requested=true 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,097 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,097 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:55,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:55,099 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,099 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/32addceec6a646779693e5b7dd935e3b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.7 K 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:55,099 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,099 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/066689afb5e743199f45f589b36beeb9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.7 K 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 32addceec6a646779693e5b7dd935e3b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:55,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bf2f0f77f274fc8bce799935da140cb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1734042953030 2024-12-12T22:35:55,100 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 066689afb5e743199f45f589b36beeb9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:55,100 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 97e07d7bc729483a84092aa62fbaea4e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,100 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ffb665ff3454e0aa730b780600368f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1734042953030 2024-12-12T22:35:55,101 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 495686f980824b9c8f8143ce0fb10997, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,110 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,111 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/da608df54f2d4a3093af6c323cbe9883 is 50, key is test_row_0/A:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:55,114 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,117 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c723f83cb5a44306a671f7b2263f4ecb is 50, key is test_row_0/B:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:55,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742104_1280 (size=13051) 2024-12-12T22:35:55,141 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/da608df54f2d4a3093af6c323cbe9883 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da608df54f2d4a3093af6c323cbe9883 2024-12-12T22:35:55,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742105_1281 (size=13051) 2024-12-12T22:35:55,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=66 2024-12-12T22:35:55,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,152 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:35:55,156 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c723f83cb5a44306a671f7b2263f4ecb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c723f83cb5a44306a671f7b2263f4ecb 2024-12-12T22:35:55,158 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into da608df54f2d4a3093af6c323cbe9883(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,158 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,158 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042955097; duration=0sec 2024-12-12T22:35:55,159 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:55,159 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:55,159 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:55,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,162 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,162 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:55,162 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,162 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/88d5a8c37210462ea1a78302379beb3b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.7 K 2024-12-12T22:35:55,163 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 88d5a8c37210462ea1a78302379beb3b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=270, earliestPutTs=1734042952701 2024-12-12T22:35:55,164 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d68f187126cb4eac9380c1c4fda0750c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=296, earliestPutTs=1734042953030 2024-12-12T22:35:55,164 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7014cc67b0784f7fa8213f4c698d98d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/172a5f073ef34f52b1397452c41d1558 is 50, key is test_row_0/A:col10/1734042954287/Put/seqid=0 2024-12-12T22:35:55,168 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into c723f83cb5a44306a671f7b2263f4ecb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,168 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,168 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042955097; duration=0sec 2024-12-12T22:35:55,168 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,168 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:55,181 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,182 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/75192e4c2b7941af92eb09ef685595c9 is 50, key is test_row_0/C:col10/1734042954217/Put/seqid=0 2024-12-12T22:35:55,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742106_1282 (size=12301) 2024-12-12T22:35:55,207 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/172a5f073ef34f52b1397452c41d1558 2024-12-12T22:35:55,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/f541e4dfd4514308a626e794f31da70f is 50, key is test_row_0/B:col10/1734042954287/Put/seqid=0 2024-12-12T22:35:55,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742107_1283 (size=13051) 2024-12-12T22:35:55,256 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/75192e4c2b7941af92eb09ef685595c9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/75192e4c2b7941af92eb09ef685595c9 2024-12-12T22:35:55,265 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 75192e4c2b7941af92eb09ef685595c9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,265 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,265 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042955097; duration=0sec 2024-12-12T22:35:55,265 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,265 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742108_1284 (size=12301) 2024-12-12T22:35:55,276 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/f541e4dfd4514308a626e794f31da70f 2024-12-12T22:35:55,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b9afb4a80f904a64827150785e0b3119 is 50, key is test_row_0/C:col10/1734042954287/Put/seqid=0 2024-12-12T22:35:55,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T22:35:55,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:55,298 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:55,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742109_1285 (size=12301) 2024-12-12T22:35:55,310 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b9afb4a80f904a64827150785e0b3119 2024-12-12T22:35:55,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/172a5f073ef34f52b1397452c41d1558 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558 2024-12-12T22:35:55,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043015312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043015316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043015315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043015317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,324 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558, entries=150, sequenceid=335, filesize=12.0 K 2024-12-12T22:35:55,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/f541e4dfd4514308a626e794f31da70f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f 2024-12-12T22:35:55,334 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f, entries=150, sequenceid=335, filesize=12.0 K 2024-12-12T22:35:55,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/b9afb4a80f904a64827150785e0b3119 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119 2024-12-12T22:35:55,343 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119, entries=150, sequenceid=335, filesize=12.0 K 2024-12-12T22:35:55,344 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 192ms, sequenceid=335, compaction requested=false 2024-12-12T22:35:55,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=66}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=66 2024-12-12T22:35:55,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=66 2024-12-12T22:35:55,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-12T22:35:55,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 652 msec 2024-12-12T22:35:55,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=65, table=TestAcidGuarantees in 657 msec 2024-12-12T22:35:55,414 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:55,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:55,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/52e07e874f394f8bb28f543f315740cf is 50, key is test_row_0/A:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043015441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043015442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043015443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043015445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043015445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742110_1286 (size=14741) 2024-12-12T22:35:55,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/52e07e874f394f8bb28f543f315740cf 2024-12-12T22:35:55,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/04919cedd02f438d9688a507abce14e8 is 50, key is test_row_0/B:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742111_1287 (size=12301) 2024-12-12T22:35:55,538 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/04919cedd02f438d9688a507abce14e8 2024-12-12T22:35:55,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043015548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,552 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043015549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043015549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,556 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,556 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043015554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043015554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,558 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/3ae6763164824df394c4123aaa943a0f is 50, key is test_row_0/C:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742112_1288 (size=12301) 2024-12-12T22:35:55,565 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=353 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/3ae6763164824df394c4123aaa943a0f 2024-12-12T22:35:55,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/52e07e874f394f8bb28f543f315740cf as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf 2024-12-12T22:35:55,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf, entries=200, sequenceid=353, filesize=14.4 K 2024-12-12T22:35:55,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/04919cedd02f438d9688a507abce14e8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8 2024-12-12T22:35:55,587 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8, entries=150, sequenceid=353, filesize=12.0 K 2024-12-12T22:35:55,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/3ae6763164824df394c4123aaa943a0f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f 2024-12-12T22:35:55,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f, entries=150, sequenceid=353, filesize=12.0 K 2024-12-12T22:35:55,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 191ms, sequenceid=353, compaction requested=true 2024-12-12T22:35:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,605 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,605 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:55,607 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,607 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:55,607 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,607 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c723f83cb5a44306a671f7b2263f4ecb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.8 K 2024-12-12T22:35:55,608 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c723f83cb5a44306a671f7b2263f4ecb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,608 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f541e4dfd4514308a626e794f31da70f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734042954268 2024-12-12T22:35:55,608 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 04919cedd02f438d9688a507abce14e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955312 2024-12-12T22:35:55,616 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40093 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,616 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:55,616 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,616 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da608df54f2d4a3093af6c323cbe9883, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=39.2 K 2024-12-12T22:35:55,617 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting da608df54f2d4a3093af6c323cbe9883, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,617 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 172a5f073ef34f52b1397452c41d1558, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734042954268 2024-12-12T22:35:55,618 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52e07e874f394f8bb28f543f315740cf, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955308 2024-12-12T22:35:55,625 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,626 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c is 50, key is test_row_0/B:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,657 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,658 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/334e96f82f9641808ee440630fb0ad37 is 50, key is test_row_0/A:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742113_1289 (size=13153) 2024-12-12T22:35:55,697 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c 2024-12-12T22:35:55,707 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into c6fa9c97cd7d4bbcab2ba36c8ec2355c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,707 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,707 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=13, startTime=1734042955605; duration=0sec 2024-12-12T22:35:55,707 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:55,708 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:35:55,708 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:35:55,709 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:35:55,709 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:35:55,709 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,709 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/75192e4c2b7941af92eb09ef685595c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=36.8 K 2024-12-12T22:35:55,710 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 75192e4c2b7941af92eb09ef685595c9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=310, earliestPutTs=1734042954181 2024-12-12T22:35:55,710 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b9afb4a80f904a64827150785e0b3119, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1734042954268 2024-12-12T22:35:55,710 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ae6763164824df394c4123aaa943a0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955312 2024-12-12T22:35:55,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742114_1290 (size=13153) 2024-12-12T22:35:55,732 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/334e96f82f9641808ee440630fb0ad37 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/334e96f82f9641808ee440630fb0ad37 2024-12-12T22:35:55,741 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into 334e96f82f9641808ee440630fb0ad37(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,741 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,741 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=13, startTime=1734042955605; duration=0sec 2024-12-12T22:35:55,741 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,741 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:35:55,753 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#245 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:55,753 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/95b3dc8d042e4e97b8606e7c6a8d0af3 is 50, key is test_row_0/C:col10/1734042955412/Put/seqid=0 2024-12-12T22:35:55,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T22:35:55,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:55,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:55,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/7be9a907fb0d48bfa608ed13db21fcc6 is 50, key is test_row_0/A:col10/1734042955754/Put/seqid=0 2024-12-12T22:35:55,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043015781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,790 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043015783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043015784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043015789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,794 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043015791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-12-12T22:35:55,797 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-12-12T22:35:55,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:55,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-12T22:35:55,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:55,805 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:55,807 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:55,807 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:55,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742115_1291 (size=13153) 2024-12-12T22:35:55,819 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/95b3dc8d042e4e97b8606e7c6a8d0af3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95b3dc8d042e4e97b8606e7c6a8d0af3 2024-12-12T22:35:55,834 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 95b3dc8d042e4e97b8606e7c6a8d0af3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:35:55,834 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:55,834 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=13, startTime=1734042955606; duration=0sec 2024-12-12T22:35:55,834 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:55,834 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:35:55,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742116_1292 (size=12301) 2024-12-12T22:35:55,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043015892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043015892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043015893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043015895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043015895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:55,960 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:55,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:55,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:55,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:55,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:55,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:55,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043016103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043016103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043016103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043016103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043016103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,113 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,114 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,240 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/7be9a907fb0d48bfa608ed13db21fcc6 2024-12-12T22:35:56,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,270 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4d6f6759394844d2a09b4a40e6aaca44 is 50, key is test_row_0/B:col10/1734042955754/Put/seqid=0 2024-12-12T22:35:56,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742117_1293 (size=12301) 2024-12-12T22:35:56,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:56,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043016408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043016408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,411 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043016408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,412 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043016410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043016410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,425 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,577 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,583 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,710 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4d6f6759394844d2a09b4a40e6aaca44 2024-12-12T22:35:56,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6b31d6caecdf438a95b13b42af34c294 is 50, key is test_row_0/C:col10/1734042955754/Put/seqid=0 2024-12-12T22:35:56,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,737 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:35:56,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742118_1294 (size=12301) 2024-12-12T22:35:56,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6b31d6caecdf438a95b13b42af34c294 2024-12-12T22:35:56,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/7be9a907fb0d48bfa608ed13db21fcc6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6 2024-12-12T22:35:56,795 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T22:35:56,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/4d6f6759394844d2a09b4a40e6aaca44 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44 2024-12-12T22:35:56,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T22:35:56,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/6b31d6caecdf438a95b13b42af34c294 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294 2024-12-12T22:35:56,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294, entries=150, sequenceid=376, filesize=12.0 K 2024-12-12T22:35:56,820 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1062ms, sequenceid=376, compaction requested=false 2024-12-12T22:35:56,821 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:56,891 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-12T22:35:56,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:56,893 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:56,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:56,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/45d9a4ff02af445c8c5a9d3e78c81731 is 50, key is test_row_0/A:col10/1734042955778/Put/seqid=0 2024-12-12T22:35:56,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:56,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:56,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:35:56,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742119_1295 (size=12301) 2024-12-12T22:35:56,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043016940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043016945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043016945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043016945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:56,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:56,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043016946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043017049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043017050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043017050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043017050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,055 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043017054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043017253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043017253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043017253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043017255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043017260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,323 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/45d9a4ff02af445c8c5a9d3e78c81731 2024-12-12T22:35:57,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/5a36ddf649da499e8c3bb6a6da35e3b6 is 50, key is test_row_0/B:col10/1734042955778/Put/seqid=0 2024-12-12T22:35:57,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742120_1296 (size=12301) 2024-12-12T22:35:57,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043017557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043017560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043017560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043017560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:57,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043017565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:57,735 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/5a36ddf649da499e8c3bb6a6da35e3b6 2024-12-12T22:35:57,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b is 50, key is test_row_0/C:col10/1734042955778/Put/seqid=0 2024-12-12T22:35:57,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742121_1297 (size=12301) 2024-12-12T22:35:57,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:58,062 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:58,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043018061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:58,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:58,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043018062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:58,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043018062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:58,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:58,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043018066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:58,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:58,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043018067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:58,149 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b 2024-12-12T22:35:58,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/45d9a4ff02af445c8c5a9d3e78c81731 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731 2024-12-12T22:35:58,158 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731, entries=150, sequenceid=393, filesize=12.0 K 2024-12-12T22:35:58,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/5a36ddf649da499e8c3bb6a6da35e3b6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6 2024-12-12T22:35:58,164 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6, entries=150, sequenceid=393, filesize=12.0 K 2024-12-12T22:35:58,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b 2024-12-12T22:35:58,168 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b, entries=150, sequenceid=393, filesize=12.0 K 2024-12-12T22:35:58,169 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1276ms, sequenceid=393, compaction requested=true 2024-12-12T22:35:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-12T22:35:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-12T22:35:58,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-12T22:35:58,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3650 sec 2024-12-12T22:35:58,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 2.3740 sec 2024-12-12T22:35:59,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:35:59,068 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:35:59,069 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:35:59,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5670fb993042444286401a59f37a82c0 is 50, key is test_row_0/A:col10/1734042956944/Put/seqid=0 2024-12-12T22:35:59,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742122_1298 (size=14741) 2024-12-12T22:35:59,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043019078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043019079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043019080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043019081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,084 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043019081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043019184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043019184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043019185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043019187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043019387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043019388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043019390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043019392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,477 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5670fb993042444286401a59f37a82c0 2024-12-12T22:35:59,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/a762f269556e4d368b2571a36782070b is 50, key is test_row_0/B:col10/1734042956944/Put/seqid=0 2024-12-12T22:35:59,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742123_1299 (size=12301) 2024-12-12T22:35:59,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/a762f269556e4d368b2571a36782070b 2024-12-12T22:35:59,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/8be24623b1484ee38ce0a7934f5cb891 is 50, key is test_row_0/C:col10/1734042956944/Put/seqid=0 2024-12-12T22:35:59,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742124_1300 (size=12301) 2024-12-12T22:35:59,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043019690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043019691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043019693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:35:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043019693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:35:59,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/8be24623b1484ee38ce0a7934f5cb891 2024-12-12T22:35:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-12T22:35:59,910 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-12T22:35:59,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:35:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-12T22:35:59,919 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:35:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:35:59,920 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:35:59,920 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:35:59,923 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/5670fb993042444286401a59f37a82c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0 2024-12-12T22:35:59,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0, entries=200, sequenceid=415, filesize=14.4 K 2024-12-12T22:35:59,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/a762f269556e4d368b2571a36782070b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b 2024-12-12T22:35:59,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b, entries=150, sequenceid=415, filesize=12.0 K 2024-12-12T22:35:59,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/8be24623b1484ee38ce0a7934f5cb891 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891 2024-12-12T22:35:59,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891, entries=150, sequenceid=415, filesize=12.0 K 2024-12-12T22:35:59,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 884ms, sequenceid=415, compaction requested=true 2024-12-12T22:35:59,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:35:59,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:59,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:35:59,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:59,953 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:35:59,953 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:35:59,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:35:59,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3e6ba90564d3642fab3e7bc05bfeebf6:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:35:59,954 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:35:59,956 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:59,956 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/B is initiating minor compaction (all files) 2024-12-12T22:35:59,956 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/B in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:59,956 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=48.9 K 2024-12-12T22:35:59,956 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52496 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:35:59,957 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/A is initiating minor compaction (all files) 2024-12-12T22:35:59,957 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/A in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:35:59,957 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/334e96f82f9641808ee440630fb0ad37, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=51.3 K 2024-12-12T22:35:59,957 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c6fa9c97cd7d4bbcab2ba36c8ec2355c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955312 2024-12-12T22:35:59,959 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 334e96f82f9641808ee440630fb0ad37, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955312 2024-12-12T22:35:59,960 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d6f6759394844d2a09b4a40e6aaca44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734042955440 2024-12-12T22:35:59,960 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7be9a907fb0d48bfa608ed13db21fcc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734042955440 2024-12-12T22:35:59,960 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a36ddf649da499e8c3bb6a6da35e3b6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1734042955778 2024-12-12T22:35:59,961 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a762f269556e4d368b2571a36782070b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734042956936 2024-12-12T22:35:59,961 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45d9a4ff02af445c8c5a9d3e78c81731, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1734042955778 2024-12-12T22:35:59,962 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5670fb993042444286401a59f37a82c0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734042956936 2024-12-12T22:35:59,984 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#B#compaction#255 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:35:59,985 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/45e7c04b4a4f45889d1694b0aefc8de3 is 50, key is test_row_0/B:col10/1734042956944/Put/seqid=0 2024-12-12T22:36:00,002 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#A#compaction#256 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:00,003 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/2ae702588bdb45ef924554ee680c3e87 is 50, key is test_row_0/A:col10/1734042956944/Put/seqid=0 2024-12-12T22:36:00,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742125_1301 (size=13289) 2024-12-12T22:36:00,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:36:00,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742126_1302 (size=13289) 2024-12-12T22:36:00,039 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/2ae702588bdb45ef924554ee680c3e87 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2ae702588bdb45ef924554ee680c3e87 2024-12-12T22:36:00,049 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/A of 3e6ba90564d3642fab3e7bc05bfeebf6 into 2ae702588bdb45ef924554ee680c3e87(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:00,049 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:00,049 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/A, priority=12, startTime=1734042959952; duration=0sec 2024-12-12T22:36:00,049 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:00,049 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:A 2024-12-12T22:36:00,049 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:00,051 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:00,051 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 3e6ba90564d3642fab3e7bc05bfeebf6/C is initiating minor compaction (all files) 2024-12-12T22:36:00,051 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3e6ba90564d3642fab3e7bc05bfeebf6/C in TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:00,051 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95b3dc8d042e4e97b8606e7c6a8d0af3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp, totalSize=48.9 K 2024-12-12T22:36:00,051 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95b3dc8d042e4e97b8606e7c6a8d0af3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=353, earliestPutTs=1734042955312 2024-12-12T22:36:00,052 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b31d6caecdf438a95b13b42af34c294, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1734042955440 2024-12-12T22:36:00,052 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfbb3eaffa554c3bb80a7b0cb6f7b00b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1734042955778 2024-12-12T22:36:00,052 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8be24623b1484ee38ce0a7934f5cb891, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734042956936 2024-12-12T22:36:00,062 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3e6ba90564d3642fab3e7bc05bfeebf6#C#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:00,062 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/95c5bf515f1a40bc8a282d3c1d07987f is 50, key is test_row_0/C:col10/1734042956944/Put/seqid=0 2024-12-12T22:36:00,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742127_1303 (size=13289) 2024-12-12T22:36:00,071 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-12T22:36:00,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:00,071 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:36:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:00,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/411f643023504c5c80a2f10e3de849c0 is 50, key is test_row_0/A:col10/1734042959078/Put/seqid=0 2024-12-12T22:36:00,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742128_1304 (size=12301) 2024-12-12T22:36:00,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:36:00,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:00,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:36:00,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043020218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,223 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043020220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043020221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043020222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043020323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043020324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043020326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,328 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043020326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,423 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/45e7c04b4a4f45889d1694b0aefc8de3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/45e7c04b4a4f45889d1694b0aefc8de3 2024-12-12T22:36:00,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/B of 3e6ba90564d3642fab3e7bc05bfeebf6 into 45e7c04b4a4f45889d1694b0aefc8de3(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:00,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:00,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/B, priority=12, startTime=1734042959953; duration=0sec 2024-12-12T22:36:00,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:00,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:B 2024-12-12T22:36:00,482 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/95c5bf515f1a40bc8a282d3c1d07987f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95c5bf515f1a40bc8a282d3c1d07987f 2024-12-12T22:36:00,483 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/411f643023504c5c80a2f10e3de849c0 2024-12-12T22:36:00,502 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3e6ba90564d3642fab3e7bc05bfeebf6/C of 3e6ba90564d3642fab3e7bc05bfeebf6 into 95c5bf515f1a40bc8a282d3c1d07987f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:00,502 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:00,502 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., storeName=3e6ba90564d3642fab3e7bc05bfeebf6/C, priority=12, startTime=1734042959954; duration=0sec 2024-12-12T22:36:00,502 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:00,502 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3e6ba90564d3642fab3e7bc05bfeebf6:C 2024-12-12T22:36:00,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/3d70f1d9f294447e803c40e6a63a8f2a is 50, key is test_row_0/B:col10/1734042959078/Put/seqid=0 2024-12-12T22:36:00,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742129_1305 (size=12301) 2024-12-12T22:36:00,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:36:00,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043020526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043020527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043020529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043020530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043020829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043020831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,833 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043020833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:00,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043020833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:00,918 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/3d70f1d9f294447e803c40e6a63a8f2a 2024-12-12T22:36:00,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/eec5daa95c2b4e8782d1247dff534beb is 50, key is test_row_0/C:col10/1734042959078/Put/seqid=0 2024-12-12T22:36:00,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742130_1306 (size=12301) 2024-12-12T22:36:01,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:36:01,088 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:01,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43256 deadline: 1734043021087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:01,089 DEBUG [Thread-1010 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:01,231 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:36:01,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43338 deadline: 1734043021333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:01,335 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:01,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43332 deadline: 1734043021334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:01,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43288 deadline: 1734043021338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:01,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:01,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43302 deadline: 1734043021338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:01,373 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/eec5daa95c2b4e8782d1247dff534beb 2024-12-12T22:36:01,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/411f643023504c5c80a2f10e3de849c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/411f643023504c5c80a2f10e3de849c0 2024-12-12T22:36:01,389 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/411f643023504c5c80a2f10e3de849c0, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T22:36:01,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/3d70f1d9f294447e803c40e6a63a8f2a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/3d70f1d9f294447e803c40e6a63a8f2a 2024-12-12T22:36:01,395 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/3d70f1d9f294447e803c40e6a63a8f2a, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T22:36:01,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/eec5daa95c2b4e8782d1247dff534beb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/eec5daa95c2b4e8782d1247dff534beb 2024-12-12T22:36:01,402 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/eec5daa95c2b4e8782d1247dff534beb, entries=150, sequenceid=431, filesize=12.0 K 2024-12-12T22:36:01,403 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1332ms, sequenceid=431, compaction requested=false 2024-12-12T22:36:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:01,403 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-12T22:36:01,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-12T22:36:01,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-12T22:36:01,405 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4840 sec 2024-12-12T22:36:01,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.4900 sec 2024-12-12T22:36:02,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-12T22:36:02,024 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-12T22:36:02,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:02,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-12T22:36:02,026 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:02,026 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:02,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:02,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:02,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:02,148 DEBUG [Thread-1019 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:50645 2024-12-12T22:36:02,149 DEBUG [Thread-1019 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,149 DEBUG [Thread-1023 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ad882f to 127.0.0.1:50645 2024-12-12T22:36:02,149 DEBUG [Thread-1023 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,150 DEBUG [Thread-1015 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:50645 2024-12-12T22:36:02,150 DEBUG [Thread-1015 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,150 DEBUG [Thread-1021 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x598cfed4 to 127.0.0.1:50645 2024-12-12T22:36:02,150 DEBUG [Thread-1017 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:50645 2024-12-12T22:36:02,150 DEBUG [Thread-1021 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,150 DEBUG [Thread-1017 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:02,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:02,179 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:36:02,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/629b52abe8c1477cb63a36746d8c511b is 50, key is test_row_0/A:col10/1734042960215/Put/seqid=0 2024-12-12T22:36:02,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742131_1307 (size=12301) 2024-12-12T22:36:02,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:02,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:02,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. as already flushing 2024-12-12T22:36:02,338 DEBUG [Thread-1004 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:50645 2024-12-12T22:36:02,338 DEBUG [Thread-1004 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,339 DEBUG [Thread-1006 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:50645 2024-12-12T22:36:02,339 DEBUG [Thread-1006 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,342 DEBUG [Thread-1008 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:50645 2024-12-12T22:36:02,342 DEBUG [Thread-1008 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,351 DEBUG [Thread-1012 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:50645 2024-12-12T22:36:02,351 DEBUG [Thread-1012 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:02,587 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/629b52abe8c1477cb63a36746d8c511b 2024-12-12T22:36:02,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/840d0ff146234158ab618d43e72843a3 is 50, key is test_row_0/B:col10/1734042960215/Put/seqid=0 2024-12-12T22:36:02,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742132_1308 (size=12301) 2024-12-12T22:36:02,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:02,998 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/840d0ff146234158ab618d43e72843a3 2024-12-12T22:36:03,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/ceff644106f6477daeec607295300557 is 50, key is test_row_0/C:col10/1734042960215/Put/seqid=0 2024-12-12T22:36:03,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742133_1309 (size=12301) 2024-12-12T22:36:03,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:03,409 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/ceff644106f6477daeec607295300557 2024-12-12T22:36:03,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/629b52abe8c1477cb63a36746d8c511b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/629b52abe8c1477cb63a36746d8c511b 2024-12-12T22:36:03,417 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/629b52abe8c1477cb63a36746d8c511b, entries=150, sequenceid=455, filesize=12.0 K 2024-12-12T22:36:03,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/840d0ff146234158ab618d43e72843a3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/840d0ff146234158ab618d43e72843a3 2024-12-12T22:36:03,421 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/840d0ff146234158ab618d43e72843a3, entries=150, sequenceid=455, filesize=12.0 K 2024-12-12T22:36:03,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/ceff644106f6477daeec607295300557 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/ceff644106f6477daeec607295300557 2024-12-12T22:36:03,425 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/ceff644106f6477daeec607295300557, entries=150, sequenceid=455, filesize=12.0 K 2024-12-12T22:36:03,426 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=26.84 KB/27480 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1247ms, sequenceid=455, compaction requested=true 2024-12-12T22:36:03,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:03,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:03,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-12T22:36:03,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-12T22:36:03,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-12T22:36:03,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4010 sec 2024-12-12T22:36:03,429 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.4040 sec 2024-12-12T22:36:04,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-12T22:36:04,130 INFO [Thread-1014 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-12T22:36:05,111 DEBUG [Thread-1010 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:50645 2024-12-12T22:36:05,111 DEBUG [Thread-1010 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5603 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5550 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5279 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5601 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5562 2024-12-12T22:36:05,111 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:36:05,111 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:36:05,111 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:50645 2024-12-12T22:36:05,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:05,112 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:36:05,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:36:05,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:05,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:05,114 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042965114"}]},"ts":"1734042965114"} 2024-12-12T22:36:05,115 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:36:05,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:05,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:05,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:06,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:06,557 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-169504562_22 at /127.0.0.1:45212 [Receiving block BP-1705391202-172.17.0.2-1734042868369:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 599ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/dfs/data/data2/, blockId=1073741830, seqno=1033 2024-12-12T22:36:06,559 INFO [AsyncFSWAL-0-hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData-prefix:1aef280cf0a8,35059,1734042872477 {}] wal.AbstractFSWAL(1183): Slow sync cost: 600 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:43795,DS-00eb3ae2-2973-4932-bbde-95bf2b7ce22d,DISK]] 2024-12-12T22:36:06,644 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:36:06,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:36:06,651 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, UNASSIGN}] 2024-12-12T22:36:06,652 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, UNASSIGN 2024-12-12T22:36:06,652 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=3e6ba90564d3642fab3e7bc05bfeebf6, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:06,653 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:36:06,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; CloseRegionProcedure 3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:06,805 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:06,806 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(124): Close 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1681): Closing 3e6ba90564d3642fab3e7bc05bfeebf6, disabling compactions & flushes 2024-12-12T22:36:06,806 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. after waiting 0 ms 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:06,806 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(2837): Flushing 3e6ba90564d3642fab3e7bc05bfeebf6 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=A 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=B 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:06,806 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3e6ba90564d3642fab3e7bc05bfeebf6, store=C 2024-12-12T22:36:06,807 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:06,811 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/8eee41f3261d40b8bad5a58a428309a5 is 50, key is test_row_1/A:col10/1734042962341/Put/seqid=0 2024-12-12T22:36:06,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742134_1310 (size=9857) 2024-12-12T22:36:07,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:07,220 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/8eee41f3261d40b8bad5a58a428309a5 2024-12-12T22:36:07,230 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/74faaabd5f034a55a4240713c76e5550 is 50, key is test_row_1/B:col10/1734042962341/Put/seqid=0 2024-12-12T22:36:07,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742135_1311 (size=9857) 2024-12-12T22:36:07,258 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/74faaabd5f034a55a4240713c76e5550 2024-12-12T22:36:07,293 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/070464fadf464642a58ef55734223bf9 is 50, key is test_row_1/C:col10/1734042962341/Put/seqid=0 2024-12-12T22:36:07,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742136_1312 (size=9857) 2024-12-12T22:36:07,738 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=463 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/070464fadf464642a58ef55734223bf9 2024-12-12T22:36:07,749 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/A/8eee41f3261d40b8bad5a58a428309a5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8eee41f3261d40b8bad5a58a428309a5 2024-12-12T22:36:07,756 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8eee41f3261d40b8bad5a58a428309a5, entries=100, sequenceid=463, filesize=9.6 K 2024-12-12T22:36:07,757 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/B/74faaabd5f034a55a4240713c76e5550 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/74faaabd5f034a55a4240713c76e5550 2024-12-12T22:36:07,772 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/74faaabd5f034a55a4240713c76e5550, entries=100, sequenceid=463, filesize=9.6 K 2024-12-12T22:36:07,773 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/.tmp/C/070464fadf464642a58ef55734223bf9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/070464fadf464642a58ef55734223bf9 2024-12-12T22:36:07,812 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/070464fadf464642a58ef55734223bf9, entries=100, sequenceid=463, filesize=9.6 K 2024-12-12T22:36:07,815 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 3e6ba90564d3642fab3e7bc05bfeebf6 in 1009ms, sequenceid=463, compaction requested=true 2024-12-12T22:36:07,819 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/d80c51dcd75f40cc9d94ca7bb9de21c7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eb6acbe6a78e47619735d83606635d22, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/72ba09189fa14a23b65fcfe491072a00, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/deaf1a61866140ddb3f0dc2a2ee08423, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8205cd8d516c4f6daf88b82b74766076, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/32addceec6a646779693e5b7dd935e3b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da608df54f2d4a3093af6c323cbe9883, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/334e96f82f9641808ee440630fb0ad37, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0] to archive 2024-12-12T22:36:07,827 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:07,847 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/f769c84061c54aadb4dfdf15e52e2f5f 2024-12-12T22:36:07,847 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/88f7277df0074efb80948396f47df68c 2024-12-12T22:36:07,847 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/69ca4d175cd141ec97194fb94398f5b3 2024-12-12T22:36:07,847 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/e980fa22fa7744ef9e0b8a1fa7ef2030 2024-12-12T22:36:07,848 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2a0a3c9297264b1d83db48043603d9bb 2024-12-12T22:36:07,848 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/d80c51dcd75f40cc9d94ca7bb9de21c7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/d80c51dcd75f40cc9d94ca7bb9de21c7 2024-12-12T22:36:07,851 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eb6acbe6a78e47619735d83606635d22 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eb6acbe6a78e47619735d83606635d22 2024-12-12T22:36:07,851 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/61999ed2480441878e75f33974ba3653 2024-12-12T22:36:07,854 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/eca1cd05b6c54f20b5fb3e836fb6723e 2024-12-12T22:36:07,854 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8205cd8d516c4f6daf88b82b74766076 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8205cd8d516c4f6daf88b82b74766076 2024-12-12T22:36:07,854 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/40fcc3e1c3174e17996d3f2b219c5115 2024-12-12T22:36:07,855 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da1249cb58b241d881a1d56bf153ba11 2024-12-12T22:36:07,855 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/871419e138d74f6b9c578d18ff38be8f 2024-12-12T22:36:07,857 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/6f0e49f4daa94892b028ad971d5f8949 2024-12-12T22:36:07,857 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/3bf2f0f77f274fc8bce799935da140cb 2024-12-12T22:36:07,857 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/34c8b5d450064205a3c1af810348834f 2024-12-12T22:36:07,857 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5750dc57bb144ddfb7180faec5523456 2024-12-12T22:36:07,858 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/32addceec6a646779693e5b7dd935e3b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/32addceec6a646779693e5b7dd935e3b 2024-12-12T22:36:07,859 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/b01f1dca8340497f9b164a94e465f15a 2024-12-12T22:36:07,859 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/72ba09189fa14a23b65fcfe491072a00 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/72ba09189fa14a23b65fcfe491072a00 2024-12-12T22:36:07,859 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/deaf1a61866140ddb3f0dc2a2ee08423 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/deaf1a61866140ddb3f0dc2a2ee08423 2024-12-12T22:36:07,860 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/172a5f073ef34f52b1397452c41d1558 2024-12-12T22:36:07,860 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/97e07d7bc729483a84092aa62fbaea4e 2024-12-12T22:36:07,860 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da608df54f2d4a3093af6c323cbe9883 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/da608df54f2d4a3093af6c323cbe9883 2024-12-12T22:36:07,862 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/52e07e874f394f8bb28f543f315740cf 2024-12-12T22:36:07,862 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/334e96f82f9641808ee440630fb0ad37 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/334e96f82f9641808ee440630fb0ad37 2024-12-12T22:36:07,862 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/45d9a4ff02af445c8c5a9d3e78c81731 2024-12-12T22:36:07,871 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/7be9a907fb0d48bfa608ed13db21fcc6 2024-12-12T22:36:07,871 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/5670fb993042444286401a59f37a82c0 2024-12-12T22:36:07,880 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d585fc07f3844346a942471def9ea155, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/49a15775173d4695a5f748597e808081, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/91cd6c252b174dae9309368e55b56ecd, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d1c7f0194ba54f9092984c838cbe2096, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a542453e3b2644aa9154d6ff32391126, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/066689afb5e743199f45f589b36beeb9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c723f83cb5a44306a671f7b2263f4ecb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b] to archive 2024-12-12T22:36:07,884 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:07,888 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/947e0975aaa948468ffb7c68ef7f756c 2024-12-12T22:36:07,892 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/82406327098945558f4416141f45cfa3 2024-12-12T22:36:07,892 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/80aaf6a28aba419085733fcce8224e9d 2024-12-12T22:36:07,892 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/2111dc7877eb4a0ba2b1f9096ca7c16d 2024-12-12T22:36:07,894 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/47c869fd45b54587a6aa31696feb3e82 2024-12-12T22:36:07,894 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/650caa0f18534b1fb74cfcab9f3a3e1a 2024-12-12T22:36:07,894 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/49a15775173d4695a5f748597e808081 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/49a15775173d4695a5f748597e808081 2024-12-12T22:36:07,894 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d585fc07f3844346a942471def9ea155 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d585fc07f3844346a942471def9ea155 2024-12-12T22:36:07,896 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/ba1c2d853cbb413e82fc9a7bae8e6af4 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6e1c19958784970b3d6c31cb5a3c21e 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/b10587d196ba4071a0685976a5a51033 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/81ed8e74458c4301b7b1824f2bede6f1 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/fe4205f2d7ef42ad9651ac6962f229db 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/91cd6c252b174dae9309368e55b56ecd to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/91cd6c252b174dae9309368e55b56ecd 2024-12-12T22:36:07,899 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a542453e3b2644aa9154d6ff32391126 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a542453e3b2644aa9154d6ff32391126 2024-12-12T22:36:07,900 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d1c7f0194ba54f9092984c838cbe2096 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/d1c7f0194ba54f9092984c838cbe2096 2024-12-12T22:36:07,902 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/7f21250bd5a84120b469c5f9febda6f4 2024-12-12T22:36:07,903 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/066689afb5e743199f45f589b36beeb9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/066689afb5e743199f45f589b36beeb9 2024-12-12T22:36:07,903 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c72a74138a954e8e83c0b48b899d6905 2024-12-12T22:36:07,904 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/68ab77f80f01405184099ce897197593 2024-12-12T22:36:07,904 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/f541e4dfd4514308a626e794f31da70f 2024-12-12T22:36:07,904 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/495686f980824b9c8f8143ce0fb10997 2024-12-12T22:36:07,904 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4ffb665ff3454e0aa730b780600368f6 2024-12-12T22:36:07,904 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c723f83cb5a44306a671f7b2263f4ecb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c723f83cb5a44306a671f7b2263f4ecb 2024-12-12T22:36:07,905 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/c6fa9c97cd7d4bbcab2ba36c8ec2355c 2024-12-12T22:36:07,907 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/04919cedd02f438d9688a507abce14e8 2024-12-12T22:36:07,910 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/5a36ddf649da499e8c3bb6a6da35e3b6 2024-12-12T22:36:07,910 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/a762f269556e4d368b2571a36782070b 2024-12-12T22:36:07,910 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/4d6f6759394844d2a09b4a40e6aaca44 2024-12-12T22:36:07,918 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/de6c6a87f1e74eedb6acd0a6857ff051, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/5640cfb9dda34dbfb00637f385a8f4d3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0493466700d940dd925002d6df15d66e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/39377278ec054fd4842dec67bd51b90e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b383b41733f94891a119afa0e72257de, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/88d5a8c37210462ea1a78302379beb3b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/75192e4c2b7941af92eb09ef685595c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95b3dc8d042e4e97b8606e7c6a8d0af3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891] to archive 2024-12-12T22:36:07,925 DEBUG [StoreCloser-TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:07,951 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6d1934fae0c4434a8afe13c82f45d7c5 2024-12-12T22:36:07,951 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dacfb4bdc3a8427aa1a5d0bd213e72b8 2024-12-12T22:36:07,951 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/259756512b4542c2847af05d99330ffb 2024-12-12T22:36:07,953 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/de6c6a87f1e74eedb6acd0a6857ff051 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/de6c6a87f1e74eedb6acd0a6857ff051 2024-12-12T22:36:07,953 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6ec21141c8e5462e87693d2ce37c2eaa 2024-12-12T22:36:07,953 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/5640cfb9dda34dbfb00637f385a8f4d3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/5640cfb9dda34dbfb00637f385a8f4d3 2024-12-12T22:36:07,953 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b73527c7f62747b4af1fe0ad65754cea 2024-12-12T22:36:07,954 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/51c9a168221c4c738df48d6422ceaef7 2024-12-12T22:36:07,959 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4ac9a4171bb24f63b73c2daf751a1494 2024-12-12T22:36:07,959 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0493466700d940dd925002d6df15d66e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0493466700d940dd925002d6df15d66e 2024-12-12T22:36:07,960 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/1e3dbadf806e421ab287ce69fc9fe99f 2024-12-12T22:36:07,960 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/39377278ec054fd4842dec67bd51b90e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/39377278ec054fd4842dec67bd51b90e 2024-12-12T22:36:07,962 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/278852ead030498382a1eb7ae562d5c0 2024-12-12T22:36:07,963 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/9fddc63d62f0424fb38447564dbac592 2024-12-12T22:36:07,963 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/0dbc0bbac7484b7896ba6ded5476b66f 2024-12-12T22:36:07,964 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b383b41733f94891a119afa0e72257de to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b383b41733f94891a119afa0e72257de 2024-12-12T22:36:07,967 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/4c8e2997c174426aab5551f305f5b40f 2024-12-12T22:36:07,967 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/c54511e9ccad4c3eac6021e4edf1f8b9 2024-12-12T22:36:07,967 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/88d5a8c37210462ea1a78302379beb3b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/88d5a8c37210462ea1a78302379beb3b 2024-12-12T22:36:07,968 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7f9b49a6c5dc4658ad83eb1b17faa2c4 2024-12-12T22:36:07,972 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/d68f187126cb4eac9380c1c4fda0750c 2024-12-12T22:36:07,972 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/b9afb4a80f904a64827150785e0b3119 2024-12-12T22:36:07,973 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/7014cc67b0784f7fa8213f4c698d98d0 2024-12-12T22:36:07,976 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/3ae6763164824df394c4123aaa943a0f 2024-12-12T22:36:07,976 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95b3dc8d042e4e97b8606e7c6a8d0af3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95b3dc8d042e4e97b8606e7c6a8d0af3 2024-12-12T22:36:07,976 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/6b31d6caecdf438a95b13b42af34c294 2024-12-12T22:36:07,977 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/dfbb3eaffa554c3bb80a7b0cb6f7b00b 2024-12-12T22:36:07,979 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/75192e4c2b7941af92eb09ef685595c9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/75192e4c2b7941af92eb09ef685595c9 2024-12-12T22:36:07,981 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/8be24623b1484ee38ce0a7934f5cb891 2024-12-12T22:36:08,013 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/recovered.edits/466.seqid, newMaxSeqId=466, maxSeqId=1 2024-12-12T22:36:08,013 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6. 2024-12-12T22:36:08,014 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] regionserver.HRegion(1635): Region close journal for 3e6ba90564d3642fab3e7bc05bfeebf6: 2024-12-12T22:36:08,015 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=76}] handler.UnassignRegionHandler(170): Closed 3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:08,015 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=75 updating hbase:meta row=3e6ba90564d3642fab3e7bc05bfeebf6, regionState=CLOSED 2024-12-12T22:36:08,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-12T22:36:08,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; CloseRegionProcedure 3e6ba90564d3642fab3e7bc05bfeebf6, server=1aef280cf0a8,36025,1734042873576 in 1.3630 sec 2024-12-12T22:36:08,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-12T22:36:08,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3e6ba90564d3642fab3e7bc05bfeebf6, UNASSIGN in 1.3670 sec 2024-12-12T22:36:08,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-12T22:36:08,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.3780 sec 2024-12-12T22:36:08,027 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042968027"}]},"ts":"1734042968027"} 2024-12-12T22:36:08,028 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:36:08,083 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:36:08,088 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.9740 sec 2024-12-12T22:36:09,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-12T22:36:09,236 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-12T22:36:09,247 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:36:09,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,259 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=77, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,261 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=77, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,271 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:09,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T22:36:09,285 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/recovered.edits] 2024-12-12T22:36:09,303 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2ae702588bdb45ef924554ee680c3e87 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/2ae702588bdb45ef924554ee680c3e87 2024-12-12T22:36:09,303 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/411f643023504c5c80a2f10e3de849c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/411f643023504c5c80a2f10e3de849c0 2024-12-12T22:36:09,303 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/629b52abe8c1477cb63a36746d8c511b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/629b52abe8c1477cb63a36746d8c511b 2024-12-12T22:36:09,306 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8eee41f3261d40b8bad5a58a428309a5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/A/8eee41f3261d40b8bad5a58a428309a5 2024-12-12T22:36:09,314 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/3d70f1d9f294447e803c40e6a63a8f2a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/3d70f1d9f294447e803c40e6a63a8f2a 2024-12-12T22:36:09,315 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/45e7c04b4a4f45889d1694b0aefc8de3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/45e7c04b4a4f45889d1694b0aefc8de3 2024-12-12T22:36:09,315 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/840d0ff146234158ab618d43e72843a3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/840d0ff146234158ab618d43e72843a3 2024-12-12T22:36:09,315 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/74faaabd5f034a55a4240713c76e5550 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/B/74faaabd5f034a55a4240713c76e5550 2024-12-12T22:36:09,335 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/070464fadf464642a58ef55734223bf9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/070464fadf464642a58ef55734223bf9 2024-12-12T22:36:09,335 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95c5bf515f1a40bc8a282d3c1d07987f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/95c5bf515f1a40bc8a282d3c1d07987f 2024-12-12T22:36:09,341 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/ceff644106f6477daeec607295300557 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/ceff644106f6477daeec607295300557 2024-12-12T22:36:09,343 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/eec5daa95c2b4e8782d1247dff534beb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/C/eec5daa95c2b4e8782d1247dff534beb 2024-12-12T22:36:09,355 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/recovered.edits/466.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6/recovered.edits/466.seqid 2024-12-12T22:36:09,356 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/3e6ba90564d3642fab3e7bc05bfeebf6 2024-12-12T22:36:09,356 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:36:09,358 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=77, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,367 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:36:09,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T22:36:09,377 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:36:09,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=77, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,385 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:36:09,386 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734042969385"}]},"ts":"9223372036854775807"} 2024-12-12T22:36:09,394 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:36:09,394 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3e6ba90564d3642fab3e7bc05bfeebf6, NAME => 'TestAcidGuarantees,,1734042940440.3e6ba90564d3642fab3e7bc05bfeebf6.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:36:09,394 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:36:09,396 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734042969395"}]},"ts":"9223372036854775807"} 2024-12-12T22:36:09,423 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:36:09,448 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=77, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 201 msec 2024-12-12T22:36:09,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-12T22:36:09,585 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-12T22:36:09,602 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=243 (was 243), OpenFileDescriptor=449 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1480 (was 1536), ProcessCount=11 (was 11), AvailableMemoryMB=4069 (was 4481) 2024-12-12T22:36:09,621 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=243, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=1480, ProcessCount=11, AvailableMemoryMB=4068 2024-12-12T22:36:09,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:36:09,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:36:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=78, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:09,639 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:36:09,639 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:09,640 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 78 2024-12-12T22:36:09,643 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:36:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-12-12T22:36:09,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742137_1313 (size=963) 2024-12-12T22:36:09,697 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:36:09,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-12-12T22:36:09,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742138_1314 (size=53) 2024-12-12T22:36:09,759 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:36:09,759 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 060ff996d98de0b1a764cdfe36e5b58b, disabling compactions & flushes 2024-12-12T22:36:09,759 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:09,759 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:09,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. after waiting 0 ms 2024-12-12T22:36:09,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:09,760 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:09,760 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:09,763 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:36:09,764 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734042969763"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042969763"}]},"ts":"1734042969763"} 2024-12-12T22:36:09,769 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:36:09,772 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:36:09,772 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042969772"}]},"ts":"1734042969772"} 2024-12-12T22:36:09,775 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:36:09,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, ASSIGN}] 2024-12-12T22:36:09,856 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, ASSIGN 2024-12-12T22:36:09,857 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=79, ppid=78, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:36:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-12-12T22:36:10,010 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:10,015 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; OpenRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:10,170 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:10,191 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:10,192 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7285): Opening region: {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:36:10,192 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,192 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:36:10,192 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7327): checking encryption for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,192 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7330): checking classloading for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,201 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,216 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:10,217 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName A 2024-12-12T22:36:10,217 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:10,221 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:10,221 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,231 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:10,231 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName B 2024-12-12T22:36:10,231 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:10,236 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:10,236 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,245 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:10,246 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName C 2024-12-12T22:36:10,246 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:10,248 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:10,249 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:10,252 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-12-12T22:36:10,256 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,262 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:36:10,264 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1085): writing seq id for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:10,298 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:36:10,299 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1102): Opened 060ff996d98de0b1a764cdfe36e5b58b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70600418, jitterRate=0.05202820897102356}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:36:10,300 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1001): Region open journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:10,315 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., pid=80, masterSystemTime=1734042970170 2024-12-12T22:36:10,326 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=79 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:10,327 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:10,327 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:10,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-12T22:36:10,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; OpenRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 in 316 msec 2024-12-12T22:36:10,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=78 2024-12-12T22:36:10,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=78, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, ASSIGN in 480 msec 2024-12-12T22:36:10,344 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:36:10,345 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042970345"}]},"ts":"1734042970345"} 2024-12-12T22:36:10,350 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:36:10,372 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=78, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:36:10,383 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 744 msec 2024-12-12T22:36:10,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=78 2024-12-12T22:36:10,754 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 78 completed 2024-12-12T22:36:10,756 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-12-12T22:36:10,808 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:10,812 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:10,814 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:10,815 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:36:10,816 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:36:10,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:36:10,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:36:10,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:10,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742139_1315 (size=999) 2024-12-12T22:36:10,860 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T22:36:10,860 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T22:36:10,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:36:10,868 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, REOPEN/MOVE}] 2024-12-12T22:36:10,872 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, REOPEN/MOVE 2024-12-12T22:36:10,873 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:10,874 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:36:10,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; CloseRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:11,026 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,027 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(124): Close 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,027 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:36:11,027 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1681): Closing 060ff996d98de0b1a764cdfe36e5b58b, disabling compactions & flushes 2024-12-12T22:36:11,027 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,027 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,027 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. after waiting 0 ms 2024-12-12T22:36:11,027 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,057 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T22:36:11,060 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,060 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegion(1635): Region close journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:11,060 WARN [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] regionserver.HRegionServer(3786): Not adding moved region record: 060ff996d98de0b1a764cdfe36e5b58b to self. 2024-12-12T22:36:11,069 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=84}] handler.UnassignRegionHandler(170): Closed 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,072 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=CLOSED 2024-12-12T22:36:11,077 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-12T22:36:11,077 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; CloseRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 in 200 msec 2024-12-12T22:36:11,078 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=82, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, REOPEN/MOVE; state=CLOSED, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=true 2024-12-12T22:36:11,231 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,235 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=83, state=RUNNABLE; OpenRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:11,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,390 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,390 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:36:11,391 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,391 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:36:11,391 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,391 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,392 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,393 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:11,393 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName A 2024-12-12T22:36:11,394 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:11,395 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:11,395 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,396 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:11,396 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName B 2024-12-12T22:36:11,396 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:11,397 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:11,397 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,398 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:11,398 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 060ff996d98de0b1a764cdfe36e5b58b columnFamilyName C 2024-12-12T22:36:11,398 DEBUG [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:11,398 INFO [StoreOpener-060ff996d98de0b1a764cdfe36e5b58b-1 {}] regionserver.HStore(327): Store=060ff996d98de0b1a764cdfe36e5b58b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:11,399 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,399 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,400 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,401 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:36:11,402 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,403 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 060ff996d98de0b1a764cdfe36e5b58b; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71520112, jitterRate=0.06573271751403809}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:36:11,404 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:11,410 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., pid=85, masterSystemTime=1734042971387 2024-12-12T22:36:11,412 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,412 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:11,412 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=OPEN, openSeqNum=5, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=83 2024-12-12T22:36:11,414 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=83, state=SUCCESS; OpenRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 in 178 msec 2024-12-12T22:36:11,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=82 2024-12-12T22:36:11,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=82, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, REOPEN/MOVE in 546 msec 2024-12-12T22:36:11,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-12T22:36:11,416 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 551 msec 2024-12-12T22:36:11,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 600 msec 2024-12-12T22:36:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-12T22:36:11,419 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-12-12T22:36:11,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,449 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-12-12T22:36:11,463 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,464 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-12-12T22:36:11,477 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,478 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-12-12T22:36:11,507 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,507 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-12-12T22:36:11,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,578 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-12-12T22:36:11,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,621 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-12-12T22:36:11,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,676 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-12-12T22:36:11,714 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,715 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x627cad17 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37a637ac 2024-12-12T22:36:11,758 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cb9e50e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,759 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x39387e4d to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fa53591 2024-12-12T22:36:11,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb726fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:11,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:11,835 DEBUG [hconnection-0x702dd5f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,836 DEBUG [hconnection-0x209851c9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,837 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41032, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,842 DEBUG [hconnection-0x3bb60cc6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,843 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41030, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,844 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=86, table=TestAcidGuarantees 2024-12-12T22:36:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:11,851 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=86, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=86, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:11,853 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=86, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=86, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:11,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:11,853 DEBUG [hconnection-0x62777419-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,855 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,875 DEBUG [hconnection-0x256e2eb8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,883 DEBUG [hconnection-0x3c69acdc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,883 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,883 DEBUG [hconnection-0x1a92c5d3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,887 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,887 DEBUG [hconnection-0x58f5c2de-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,888 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,891 DEBUG [hconnection-0x77d4215c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,894 DEBUG [hconnection-0x3b21a8c1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:11,896 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41102, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,896 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41094, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,899 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41104, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:11,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:11,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:11,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:11,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:11,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:11,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043031981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043031982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043031983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:11,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043031984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043031987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:11,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ec4553355b784172a7a4173f0c302daa_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042971906/Put/seqid=0 2024-12-12T22:36:12,009 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742140_1316 (size=12154) 2024-12-12T22:36:12,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043032085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043032089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043032089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043032103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043032104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:12,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043032287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043032296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043032305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043032311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043032319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,335 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,424 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:12,430 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212ec4553355b784172a7a4173f0c302daa_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ec4553355b784172a7a4173f0c302daa_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:12,432 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c881940a475f42ecb12b78a0ebdc645f, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:12,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c881940a475f42ecb12b78a0ebdc645f is 175, key is test_row_0/A:col10/1734042971906/Put/seqid=0 2024-12-12T22:36:12,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:12,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742141_1317 (size=30955) 2024-12-12T22:36:12,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,594 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043032592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043032616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043032633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043032637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:12,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043032651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,660 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,669 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,824 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,825 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,898 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c881940a475f42ecb12b78a0ebdc645f 2024-12-12T22:36:12,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/41370f4404be4004a4254d146038b167 is 50, key is test_row_0/B:col10/1734042971906/Put/seqid=0 2024-12-12T22:36:12,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:12,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:12,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:12,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:12,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:12,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:12,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742142_1318 (size=12001) 2024-12-12T22:36:13,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:13,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043033100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043033128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,140 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:13,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:13,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:13,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043033142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:13,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043033153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:13,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043033167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/41370f4404be4004a4254d146038b167 2024-12-12T22:36:13,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/cdd37298653e474f96e552accf147a7f is 50, key is test_row_0/C:col10/1734042971906/Put/seqid=0 2024-12-12T22:36:13,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742143_1319 (size=12001) 2024-12-12T22:36:13,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/cdd37298653e474f96e552accf147a7f 2024-12-12T22:36:13,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c881940a475f42ecb12b78a0ebdc645f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f 2024-12-12T22:36:13,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:13,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:13,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] handler.RSProcedureHandler(58): pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=87 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=87 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:13,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f, entries=150, sequenceid=17, filesize=30.2 K 2024-12-12T22:36:13,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/41370f4404be4004a4254d146038b167 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167 2024-12-12T22:36:13,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167, entries=150, sequenceid=17, filesize=11.7 K 2024-12-12T22:36:13,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/cdd37298653e474f96e552accf147a7f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f 2024-12-12T22:36:13,519 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f, entries=150, sequenceid=17, filesize=11.7 K 2024-12-12T22:36:13,520 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 060ff996d98de0b1a764cdfe36e5b58b in 1605ms, sequenceid=17, compaction requested=false 2024-12-12T22:36:13,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:13,591 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:36:13,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:13,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=87 2024-12-12T22:36:13,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:13,644 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:13,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:13,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121251741dbda29c489abbe3054074906d2e_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042971970/Put/seqid=0 2024-12-12T22:36:13,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742144_1320 (size=12154) 2024-12-12T22:36:13,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:14,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:14,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:14,123 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121251741dbda29c489abbe3054074906d2e_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121251741dbda29c489abbe3054074906d2e_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:14,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/f1ff543fba404df7841796b48c119766, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:14,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/f1ff543fba404df7841796b48c119766 is 175, key is test_row_0/A:col10/1734042971970/Put/seqid=0 2024-12-12T22:36:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742145_1321 (size=30955) 2024-12-12T22:36:14,156 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/f1ff543fba404df7841796b48c119766 2024-12-12T22:36:14,169 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043034161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/b14dbabc344c4046832a4698af11b2a2 is 50, key is test_row_0/B:col10/1734042971970/Put/seqid=0 2024-12-12T22:36:14,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043034161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043034169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043034179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043034182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742146_1322 (size=12001) 2024-12-12T22:36:14,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043034282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,283 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043034282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043034485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043034488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,617 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/b14dbabc344c4046832a4698af11b2a2 2024-12-12T22:36:14,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/26b311ad813c4c2f9e11348c0518f673 is 50, key is test_row_0/C:col10/1734042971970/Put/seqid=0 2024-12-12T22:36:14,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742147_1323 (size=12001) 2024-12-12T22:36:14,700 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/26b311ad813c4c2f9e11348c0518f673 2024-12-12T22:36:14,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/f1ff543fba404df7841796b48c119766 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766 2024-12-12T22:36:14,772 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766, entries=150, sequenceid=40, filesize=30.2 K 2024-12-12T22:36:14,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/b14dbabc344c4046832a4698af11b2a2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2 2024-12-12T22:36:14,795 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T22:36:14,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/26b311ad813c4c2f9e11348c0518f673 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673 2024-12-12T22:36:14,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043034793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:14,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043034799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:14,810 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T22:36:14,813 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 060ff996d98de0b1a764cdfe36e5b58b in 1169ms, sequenceid=40, compaction requested=false 2024-12-12T22:36:14,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:14,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:14,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-12T22:36:14,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-12T22:36:14,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-12T22:36:14,818 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9630 sec 2024-12-12T22:36:14,820 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=86, table=TestAcidGuarantees in 2.9880 sec 2024-12-12T22:36:15,311 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:15,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:15,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:15,328 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121289d93e8198454ad894268398f656f47d_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:15,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742148_1324 (size=14594) 2024-12-12T22:36:15,364 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:15,384 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121289d93e8198454ad894268398f656f47d_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289d93e8198454ad894268398f656f47d_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:15,395 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/3c125955fd4c4fd290b541a53e7579c2, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:15,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/3c125955fd4c4fd290b541a53e7579c2 is 175, key is test_row_0/A:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:15,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742149_1325 (size=39549) 2024-12-12T22:36:15,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043035444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043035443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043035555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043035563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043035765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:15,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043035773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:15,848 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/3c125955fd4c4fd290b541a53e7579c2 2024-12-12T22:36:15,869 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/c25bf4f4e34f4ffc97ef6ca3327b893e is 50, key is test_row_0/B:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:15,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742150_1326 (size=12001) 2024-12-12T22:36:15,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-12T22:36:15,974 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 86 completed 2024-12-12T22:36:15,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:15,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=88, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=88, table=TestAcidGuarantees 2024-12-12T22:36:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:15,988 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=88, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=88, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:15,991 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=88, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=88, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:15,991 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:16,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043036079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043036083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043036187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043036190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,192 DEBUG [Thread-1447 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4213 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:16,193 DEBUG [Thread-1443 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:16,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043036199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,203 DEBUG [Thread-1445 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4221 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:16,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:16,308 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,315 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/c25bf4f4e34f4ffc97ef6ca3327b893e 2024-12-12T22:36:16,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/61732b4e1c9c463b8b9136583b4a7b02 is 50, key is test_row_0/C:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:16,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742151_1327 (size=12001) 2024-12-12T22:36:16,415 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/61732b4e1c9c463b8b9136583b4a7b02 2024-12-12T22:36:16,449 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/3c125955fd4c4fd290b541a53e7579c2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2 2024-12-12T22:36:16,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,471 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2, entries=200, sequenceid=54, filesize=38.6 K 2024-12-12T22:36:16,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/c25bf4f4e34f4ffc97ef6ca3327b893e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e 2024-12-12T22:36:16,543 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e, entries=150, sequenceid=54, filesize=11.7 K 2024-12-12T22:36:16,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/61732b4e1c9c463b8b9136583b4a7b02 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02 2024-12-12T22:36:16,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02, entries=150, sequenceid=54, filesize=11.7 K 2024-12-12T22:36:16,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 060ff996d98de0b1a764cdfe36e5b58b in 1280ms, sequenceid=54, compaction requested=true 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:16,591 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:36:16,592 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:16,592 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:16,604 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:16,604 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:16,604 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,604 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=99.1 K 2024-12-12T22:36:16,604 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,604 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2] 2024-12-12T22:36:16,605 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:16,605 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:16,605 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,605 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=35.2 K 2024-12-12T22:36:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:16,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:16,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:16,614 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting cdd37298653e474f96e552accf147a7f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734042971875 2024-12-12T22:36:16,614 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c881940a475f42ecb12b78a0ebdc645f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734042971875 2024-12-12T22:36:16,615 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 26b311ad813c4c2f9e11348c0518f673, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734042971970 2024-12-12T22:36:16,615 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f1ff543fba404df7841796b48c119766, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734042971970 2024-12-12T22:36:16,616 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 61732b4e1c9c463b8b9136583b4a7b02, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:16,616 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c125955fd4c4fd290b541a53e7579c2, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:16,631 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,632 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123a6c69ba1e6147feb9077d821ad16c18_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042975439/Put/seqid=0 2024-12-12T22:36:16,669 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:16,669 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/99ed3cb854054e39b2d8744258c577c2 is 50, key is test_row_0/C:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:16,685 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:16,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742152_1328 (size=14594) 2024-12-12T22:36:16,697 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:16,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043036698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043036701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,717 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123a6c69ba1e6147feb9077d821ad16c18_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123a6c69ba1e6147feb9077d821ad16c18_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:16,720 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121273629ba210b94a928f94b2fb5ee512b1_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:16,722 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121273629ba210b94a928f94b2fb5ee512b1_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:16,722 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121273629ba210b94a928f94b2fb5ee512b1_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:16,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742153_1329 (size=12104) 2024-12-12T22:36:16,729 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/6b740f5c3ae7406781bf06468cc31132, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:16,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/6b740f5c3ae7406781bf06468cc31132 is 175, key is test_row_0/A:col10/1734042975439/Put/seqid=0 2024-12-12T22:36:16,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742154_1330 (size=4469) 2024-12-12T22:36:16,776 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#278 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:16,777 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/111c38ff52114e71835751ac7803948b is 175, key is test_row_0/A:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:16,783 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/99ed3cb854054e39b2d8744258c577c2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/99ed3cb854054e39b2d8744258c577c2 2024-12-12T22:36:16,795 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,796 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into 99ed3cb854054e39b2d8744258c577c2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:16,796 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:16,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,796 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=13, startTime=1734042976591; duration=0sec 2024-12-12T22:36:16,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,796 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:16,797 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:16,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,797 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:16,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742155_1331 (size=39549) 2024-12-12T22:36:16,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,807 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:16,808 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:16,808 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,808 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=35.2 K 2024-12-12T22:36:16,809 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 41370f4404be4004a4254d146038b167, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734042971875 2024-12-12T22:36:16,820 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b14dbabc344c4046832a4698af11b2a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734042971970 2024-12-12T22:36:16,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043036810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,831 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c25bf4f4e34f4ffc97ef6ca3327b893e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:16,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:16,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043036831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742156_1332 (size=31058) 2024-12-12T22:36:16,880 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:16,881 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/2acda721638c4ea99b415fa75c59360c is 50, key is test_row_0/B:col10/1734042974154/Put/seqid=0 2024-12-12T22:36:16,897 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/111c38ff52114e71835751ac7803948b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b 2024-12-12T22:36:16,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742157_1333 (size=12104) 2024-12-12T22:36:16,951 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into 111c38ff52114e71835751ac7803948b(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:16,951 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:16,951 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=13, startTime=1734042976591; duration=0sec 2024-12-12T22:36:16,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:16,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:16,956 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:16,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:16,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:16,998 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/2acda721638c4ea99b415fa75c59360c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/2acda721638c4ea99b415fa75c59360c 2024-12-12T22:36:17,039 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into 2acda721638c4ea99b415fa75c59360c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:17,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:17,039 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=13, startTime=1734042976591; duration=0sec 2024-12-12T22:36:17,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:17,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:17,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043037042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043037046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:17,114 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,199 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/6b740f5c3ae7406781bf06468cc31132 2024-12-12T22:36:17,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/972bbb732137449897e3f70c6a0b07fb is 50, key is test_row_0/B:col10/1734042975439/Put/seqid=0 2024-12-12T22:36:17,279 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,280 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742158_1334 (size=12001) 2024-12-12T22:36:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043037353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043037356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,445 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,446 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/972bbb732137449897e3f70c6a0b07fb 2024-12-12T22:36:17,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/7e19b0fdad1247d593a9866c75a406a8 is 50, key is test_row_0/C:col10/1734042975439/Put/seqid=0 2024-12-12T22:36:17,763 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742159_1335 (size=12001) 2024-12-12T22:36:17,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043037859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043037862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,927 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:17,928 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:17,928 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:17,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,082 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:18,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:18,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:18,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/7e19b0fdad1247d593a9866c75a406a8 2024-12-12T22:36:18,258 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:18,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/6b740f5c3ae7406781bf06468cc31132 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132 2024-12-12T22:36:18,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:18,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:18,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:18,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:18,259 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] handler.RSProcedureHandler(58): pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=89 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=89 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:18,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132, entries=200, sequenceid=78, filesize=38.6 K 2024-12-12T22:36:18,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/972bbb732137449897e3f70c6a0b07fb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb 2024-12-12T22:36:18,380 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T22:36:18,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/7e19b0fdad1247d593a9866c75a406a8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8 2024-12-12T22:36:18,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8, entries=150, sequenceid=78, filesize=11.7 K 2024-12-12T22:36:18,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 060ff996d98de0b1a764cdfe36e5b58b in 1790ms, sequenceid=78, compaction requested=false 2024-12-12T22:36:18,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:18,413 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:18,413 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=89 2024-12-12T22:36:18,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:18,414 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:18,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:18,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212dcff3e2ad413458497d0f48d153bed5f_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042976680/Put/seqid=0 2024-12-12T22:36:18,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742160_1336 (size=12154) 2024-12-12T22:36:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:18,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:18,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:18,920 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212dcff3e2ad413458497d0f48d153bed5f_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212dcff3e2ad413458497d0f48d153bed5f_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:18,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c129e2704b8442d893abb8664360d687, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:18,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c129e2704b8442d893abb8664360d687 is 175, key is test_row_0/A:col10/1734042976680/Put/seqid=0 2024-12-12T22:36:18,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742161_1337 (size=30955) 2024-12-12T22:36:19,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043039039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043039040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043039150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,158 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043039156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043039364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043039371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,379 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c129e2704b8442d893abb8664360d687 2024-12-12T22:36:19,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/355faa90cac445c0a7dd71d5f193dd65 is 50, key is test_row_0/B:col10/1734042976680/Put/seqid=0 2024-12-12T22:36:19,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742162_1338 (size=12001) 2024-12-12T22:36:19,494 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/355faa90cac445c0a7dd71d5f193dd65 2024-12-12T22:36:19,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/65c4d7a5b3154e65bb455c9ceea0cda2 is 50, key is test_row_0/C:col10/1734042976680/Put/seqid=0 2024-12-12T22:36:19,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742163_1339 (size=12001) 2024-12-12T22:36:19,572 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/65c4d7a5b3154e65bb455c9ceea0cda2 2024-12-12T22:36:19,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/c129e2704b8442d893abb8664360d687 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687 2024-12-12T22:36:19,615 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687, entries=150, sequenceid=93, filesize=30.2 K 2024-12-12T22:36:19,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/355faa90cac445c0a7dd71d5f193dd65 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65 2024-12-12T22:36:19,622 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T22:36:19,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/65c4d7a5b3154e65bb455c9ceea0cda2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2 2024-12-12T22:36:19,639 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2, entries=150, sequenceid=93, filesize=11.7 K 2024-12-12T22:36:19,647 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 060ff996d98de0b1a764cdfe36e5b58b in 1233ms, sequenceid=93, compaction requested=true 2024-12-12T22:36:19,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:19,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:19,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=89}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=89 2024-12-12T22:36:19,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=89 2024-12-12T22:36:19,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-12-12T22:36:19,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6790 sec 2024-12-12T22:36:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:19,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:19,703 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=88, table=TestAcidGuarantees in 3.7100 sec 2024-12-12T22:36:19,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125e101486f7f34df5976cf1b509870879_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:19,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043039746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043039755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742164_1340 (size=12154) 2024-12-12T22:36:19,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043039868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:19,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:19,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043039885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043040073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043040092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=88 2024-12-12T22:36:20,118 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 88 completed 2024-12-12T22:36:20,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:20,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=90, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees 2024-12-12T22:36:20,120 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=90, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:20,121 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=90, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:20,121 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:20,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:20,187 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:20,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043040198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,200 DEBUG [Thread-1447 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8221 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:20,201 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125e101486f7f34df5976cf1b509870879_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125e101486f7f34df5976cf1b509870879_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:20,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043040204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043040209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,212 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/573e7fc8afce42fb9afb18a9e4268031, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:20,212 DEBUG [Thread-1443 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8228 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:20,212 DEBUG [Thread-1445 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8230 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:20,212 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/573e7fc8afce42fb9afb18a9e4268031 is 175, key is test_row_0/A:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:20,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:20,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742165_1341 (size=30955) 2024-12-12T22:36:20,273 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,275 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:20,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:20,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,277 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043040387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043040400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:20,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:20,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,609 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:20,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:20,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,652 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/573e7fc8afce42fb9afb18a9e4268031 2024-12-12T22:36:20,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/16e3681103c8492e97fc82321b6ede3a is 50, key is test_row_0/B:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:20,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742166_1342 (size=12001) 2024-12-12T22:36:20,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:20,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,772 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:20,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043040914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:20,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043040914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,942 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:20,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:20,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:20,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:21,099 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:21,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:21,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:21,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,104 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] handler.RSProcedureHandler(58): pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:21,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=91 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:21,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=91 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:21,118 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/16e3681103c8492e97fc82321b6ede3a 2024-12-12T22:36:21,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/b801185a7329429dbc3bdb4354fb16df is 50, key is test_row_0/C:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:21,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742167_1343 (size=12001) 2024-12-12T22:36:21,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/b801185a7329429dbc3bdb4354fb16df 2024-12-12T22:36:21,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/573e7fc8afce42fb9afb18a9e4268031 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031 2024-12-12T22:36:21,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031, entries=150, sequenceid=119, filesize=30.2 K 2024-12-12T22:36:21,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/16e3681103c8492e97fc82321b6ede3a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a 2024-12-12T22:36:21,256 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T22:36:21,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:21,257 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/b801185a7329429dbc3bdb4354fb16df as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df 2024-12-12T22:36:21,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df, entries=150, sequenceid=119, filesize=11.7 K 2024-12-12T22:36:21,276 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 060ff996d98de0b1a764cdfe36e5b58b in 1584ms, sequenceid=119, compaction requested=true 2024-12-12T22:36:21,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:21,277 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:21,277 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:21,277 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:21,279 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:21,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=91 2024-12-12T22:36:21,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,283 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:36:21,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:21,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:21,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:21,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:21,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:21,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:21,297 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 132517 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:21,297 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:21,297 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,297 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=129.4 K 2024-12-12T22:36:21,297 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,297 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031] 2024-12-12T22:36:21,303 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 111c38ff52114e71835751ac7803948b, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:21,308 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b740f5c3ae7406781bf06468cc31132, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734042975421 2024-12-12T22:36:21,311 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:21,311 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:21,311 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:21,311 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/2acda721638c4ea99b415fa75c59360c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=47.0 K 2024-12-12T22:36:21,314 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c129e2704b8442d893abb8664360d687, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734042976668 2024-12-12T22:36:21,317 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 573e7fc8afce42fb9afb18a9e4268031, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:21,318 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2acda721638c4ea99b415fa75c59360c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:21,321 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 972bbb732137449897e3f70c6a0b07fb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734042975421 2024-12-12T22:36:21,323 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 355faa90cac445c0a7dd71d5f193dd65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734042976668 2024-12-12T22:36:21,329 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 16e3681103c8492e97fc82321b6ede3a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:21,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121220c17a0880fe4cc89afeb3f7ef8b4e95_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042979728/Put/seqid=0 2024-12-12T22:36:21,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742168_1344 (size=12154) 2024-12-12T22:36:21,400 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:21,413 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#290 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:21,414 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/47df93af8eca480ca08d38cc6ccb324a is 50, key is test_row_0/B:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:21,416 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212f164d60d8d62450fbd3ccea457911917_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:21,418 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212f164d60d8d62450fbd3ccea457911917_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:21,418 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f164d60d8d62450fbd3ccea457911917_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:21,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742169_1345 (size=12241) 2024-12-12T22:36:21,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742170_1346 (size=4469) 2024-12-12T22:36:21,570 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#289 average throughput is 0.14 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:21,570 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2cab89943ada4c04bb7b455d44a7bf92 is 175, key is test_row_0/A:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:21,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742171_1347 (size=31195) 2024-12-12T22:36:21,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:21,823 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121220c17a0880fe4cc89afeb3f7ef8b4e95_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121220c17a0880fe4cc89afeb3f7ef8b4e95_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/9396072892884435834270deb3aa6223, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:21,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/9396072892884435834270deb3aa6223 is 175, key is test_row_0/A:col10/1734042979728/Put/seqid=0 2024-12-12T22:36:21,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742172_1348 (size=30955) 2024-12-12T22:36:21,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:21,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:22,000 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/47df93af8eca480ca08d38cc6ccb324a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/47df93af8eca480ca08d38cc6ccb324a 2024-12-12T22:36:22,007 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into 47df93af8eca480ca08d38cc6ccb324a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:22,007 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:22,008 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=12, startTime=1734042981277; duration=0sec 2024-12-12T22:36:22,008 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:22,008 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:22,008 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:22,010 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:22,010 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:22,010 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:22,010 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/99ed3cb854054e39b2d8744258c577c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=47.0 K 2024-12-12T22:36:22,011 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 99ed3cb854054e39b2d8744258c577c2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734042974154 2024-12-12T22:36:22,011 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2cab89943ada4c04bb7b455d44a7bf92 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92 2024-12-12T22:36:22,012 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e19b0fdad1247d593a9866c75a406a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734042975421 2024-12-12T22:36:22,014 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 65c4d7a5b3154e65bb455c9ceea0cda2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1734042976668 2024-12-12T22:36:22,016 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b801185a7329429dbc3bdb4354fb16df, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:22,024 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into 2cab89943ada4c04bb7b455d44a7bf92(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:22,024 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:22,024 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=12, startTime=1734042981276; duration=0sec 2024-12-12T22:36:22,024 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:22,024 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:22,032 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:22,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/bd78db39c33d498ba29b5b5681479180 is 50, key is test_row_0/C:col10/1734042979687/Put/seqid=0 2024-12-12T22:36:22,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742173_1349 (size=12241) 2024-12-12T22:36:22,070 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/bd78db39c33d498ba29b5b5681479180 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bd78db39c33d498ba29b5b5681479180 2024-12-12T22:36:22,084 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into bd78db39c33d498ba29b5b5681479180(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:22,084 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:22,084 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=12, startTime=1734042981277; duration=0sec 2024-12-12T22:36:22,084 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:22,085 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:22,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043042112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043042111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043042220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043042220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:22,272 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=129, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/9396072892884435834270deb3aa6223 2024-12-12T22:36:22,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/52b0ac11b77948a7a728722e0026f500 is 50, key is test_row_0/B:col10/1734042979728/Put/seqid=0 2024-12-12T22:36:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742174_1350 (size=12001) 2024-12-12T22:36:22,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043042424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043042428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,735 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/52b0ac11b77948a7a728722e0026f500 2024-12-12T22:36:22,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043042732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,743 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:22,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043042741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:22,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/6df9902d79c84c84943b62c50c97639f is 50, key is test_row_0/C:col10/1734042979728/Put/seqid=0 2024-12-12T22:36:22,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742175_1351 (size=12001) 2024-12-12T22:36:23,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:23,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043043245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:23,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:23,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043043248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:23,271 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/6df9902d79c84c84943b62c50c97639f 2024-12-12T22:36:23,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/9396072892884435834270deb3aa6223 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223 2024-12-12T22:36:23,300 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223, entries=150, sequenceid=129, filesize=30.2 K 2024-12-12T22:36:23,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/52b0ac11b77948a7a728722e0026f500 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500 2024-12-12T22:36:23,307 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500, entries=150, sequenceid=129, filesize=11.7 K 2024-12-12T22:36:23,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/6df9902d79c84c84943b62c50c97639f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f 2024-12-12T22:36:23,319 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f, entries=150, sequenceid=129, filesize=11.7 K 2024-12-12T22:36:23,321 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 060ff996d98de0b1a764cdfe36e5b58b in 2037ms, sequenceid=129, compaction requested=false 2024-12-12T22:36:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:23,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:23,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-12T22:36:23,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-12T22:36:23,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-12-12T22:36:23,341 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2070 sec 2024-12-12T22:36:23,345 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=90, table=TestAcidGuarantees in 3.2230 sec 2024-12-12T22:36:24,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:24,256 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:24,257 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121225c2af21106b4ab4a77e0e42cc2b57d7_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:24,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:24,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043044270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:24,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043044272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=90 2024-12-12T22:36:24,279 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 90 completed 2024-12-12T22:36:24,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:24,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees 2024-12-12T22:36:24,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T22:36:24,293 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=92, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:24,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742176_1352 (size=14794) 2024-12-12T22:36:24,294 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=92, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:24,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:24,295 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,301 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121225c2af21106b4ab4a77e0e42cc2b57d7_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121225c2af21106b4ab4a77e0e42cc2b57d7_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:24,302 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/d383e4f8b3cc4d789f35015784b89608, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:24,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/d383e4f8b3cc4d789f35015784b89608 is 175, key is test_row_0/A:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:24,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742177_1353 (size=39749) 2024-12-12T22:36:24,328 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/d383e4f8b3cc4d789f35015784b89608 2024-12-12T22:36:24,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/074467a11cd34b689de507a55733d961 is 50, key is test_row_0/B:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:24,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043044376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043044378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T22:36:24,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742178_1354 (size=12151) 2024-12-12T22:36:24,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/074467a11cd34b689de507a55733d961 2024-12-12T22:36:24,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/e24fdc073c8f483ab434d7fa7fc04075 is 50, key is test_row_0/C:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:24,451 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,452 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=93 2024-12-12T22:36:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:24,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=93}] handler.RSProcedureHandler(58): pid=93 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:24,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=93 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=93 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742179_1355 (size=12151) 2024-12-12T22:36:24,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/e24fdc073c8f483ab434d7fa7fc04075 2024-12-12T22:36:24,503 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/d383e4f8b3cc4d789f35015784b89608 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608 2024-12-12T22:36:24,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608, entries=200, sequenceid=160, filesize=38.8 K 2024-12-12T22:36:24,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/074467a11cd34b689de507a55733d961 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961 2024-12-12T22:36:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T22:36:24,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/e24fdc073c8f483ab434d7fa7fc04075 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075 2024-12-12T22:36:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,575 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075, entries=150, sequenceid=160, filesize=11.9 K 2024-12-12T22:36:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 060ff996d98de0b1a764cdfe36e5b58b in 329ms, sequenceid=160, compaction requested=true 2024-12-12T22:36:24,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:24,586 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:24,586 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:24,586 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,588 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:24,589 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,589 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,589 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=99.5 K 2024-12-12T22:36:24,589 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,589 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608] 2024-12-12T22:36:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,590 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cab89943ada4c04bb7b455d44a7bf92, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,590 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9396072892884435834270deb3aa6223, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734042979728 2024-12-12T22:36:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,591 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d383e4f8b3cc4d789f35015784b89608, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:24,591 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:24,591 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,591 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,591 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/47df93af8eca480ca08d38cc6ccb324a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=35.5 K 2024-12-12T22:36:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,595 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 47df93af8eca480ca08d38cc6ccb324a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:24,596 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b0ac11b77948a7a728722e0026f500, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734042979728 2024-12-12T22:36:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,596 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 074467a11cd34b689de507a55733d961, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T22:36:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,605 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,607 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=93 2024-12-12T22:36:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:24,615 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:24,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,624 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412126418cfe3d0b94b16867f9eae8403dc2e_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:24,626 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412126418cfe3d0b94b16867f9eae8403dc2e_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:24,626 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126418cfe3d0b94b16867f9eae8403dc2e_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,629 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#298 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:24,630 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/e9e1d60d889a41a98f7aa0875192cd20 is 50, key is test_row_0/B:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742180_1356 (size=4469) 2024-12-12T22:36:24,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121217d5ace6c06f4916b1b5c4752e533e1a_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042984264/Put/seqid=0 2024-12-12T22:36:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742181_1357 (size=12493) 2024-12-12T22:36:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742182_1358 (size=9814) 2024-12-12T22:36:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T22:36:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:24,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:25,072 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#297 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:25,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/31c4b410a0f64ffa84f87bc0ef189741 is 175, key is test_row_0/A:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:25,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:25,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742183_1359 (size=31447) 2024-12-12T22:36:25,140 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121217d5ace6c06f4916b1b5c4752e533e1a_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121217d5ace6c06f4916b1b5c4752e533e1a_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:25,146 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/e9e1d60d889a41a98f7aa0875192cd20 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e9e1d60d889a41a98f7aa0875192cd20 2024-12-12T22:36:25,148 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/31c4b410a0f64ffa84f87bc0ef189741 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741 2024-12-12T22:36:25,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2404e80cb37a479cb68f40037151cff8, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:25,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2404e80cb37a479cb68f40037151cff8 is 175, key is test_row_0/A:col10/1734042984264/Put/seqid=0 2024-12-12T22:36:25,180 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into e9e1d60d889a41a98f7aa0875192cd20(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:25,180 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:25,180 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=13, startTime=1734042984586; duration=0sec 2024-12-12T22:36:25,180 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:25,180 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:25,180 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:25,191 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into 31c4b410a0f64ffa84f87bc0ef189741(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:25,191 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:25,192 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=13, startTime=1734042984585; duration=0sec 2024-12-12T22:36:25,192 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:25,192 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:25,192 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:25,192 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:25,192 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,192 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bd78db39c33d498ba29b5b5681479180, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=35.5 K 2024-12-12T22:36:25,197 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bd78db39c33d498ba29b5b5681479180, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1734042979033 2024-12-12T22:36:25,198 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6df9902d79c84c84943b62c50c97639f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734042979728 2024-12-12T22:36:25,197 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043045184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742184_1360 (size=22461) 2024-12-12T22:36:25,201 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e24fdc073c8f483ab434d7fa7fc04075, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:25,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043045197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,203 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=168, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2404e80cb37a479cb68f40037151cff8 2024-12-12T22:36:25,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/f7b4a708f91449d495cf499b498c8d1c is 50, key is test_row_0/B:col10/1734042984264/Put/seqid=0 2024-12-12T22:36:25,233 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#301 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:25,235 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/18361bd0d9664237a10f284e86a0e128 is 50, key is test_row_0/C:col10/1734042984255/Put/seqid=0 2024-12-12T22:36:25,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742185_1361 (size=9757) 2024-12-12T22:36:25,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742186_1362 (size=12493) 2024-12-12T22:36:25,276 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/f7b4a708f91449d495cf499b498c8d1c 2024-12-12T22:36:25,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043045303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043045307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/d0b7f913e38e4e94a3522cafe4aaa40a is 50, key is test_row_0/C:col10/1734042984264/Put/seqid=0 2024-12-12T22:36:25,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742187_1363 (size=9757) 2024-12-12T22:36:25,347 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/d0b7f913e38e4e94a3522cafe4aaa40a 2024-12-12T22:36:25,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2404e80cb37a479cb68f40037151cff8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8 2024-12-12T22:36:25,362 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8, entries=100, sequenceid=168, filesize=21.9 K 2024-12-12T22:36:25,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/f7b4a708f91449d495cf499b498c8d1c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c 2024-12-12T22:36:25,368 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c, entries=100, sequenceid=168, filesize=9.5 K 2024-12-12T22:36:25,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/d0b7f913e38e4e94a3522cafe4aaa40a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a 2024-12-12T22:36:25,376 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a, entries=100, sequenceid=168, filesize=9.5 K 2024-12-12T22:36:25,378 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 060ff996d98de0b1a764cdfe36e5b58b in 763ms, sequenceid=168, compaction requested=false 2024-12-12T22:36:25,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:25,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=93}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=93 2024-12-12T22:36:25,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=93 2024-12-12T22:36:25,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-12T22:36:25,385 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0890 sec 2024-12-12T22:36:25,389 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=92, table=TestAcidGuarantees in 1.0980 sec 2024-12-12T22:36:25,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-12T22:36:25,399 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-12-12T22:36:25,401 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:25,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees 2024-12-12T22:36:25,403 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:25,403 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=94, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:25,403 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:25,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:25,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:25,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=181.14 KB heapSize=475.36 KB 2024-12-12T22:36:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:25,523 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:25,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043045532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126b4db2c711d9498a9e7998561d983f34_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:25,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043045538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,555 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:25,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,557 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742188_1364 (size=12304) 2024-12-12T22:36:25,589 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:25,595 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126b4db2c711d9498a9e7998561d983f34_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b4db2c711d9498a9e7998561d983f34_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:25,596 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/0a0a82ef82674caf9e7af99c006c5a39, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:25,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/0a0a82ef82674caf9e7af99c006c5a39 is 175, key is test_row_0/A:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:25,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742189_1365 (size=31105) 2024-12-12T22:36:25,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043045648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043045655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,702 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/18361bd0d9664237a10f284e86a0e128 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/18361bd0d9664237a10f284e86a0e128 2024-12-12T22:36:25,710 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:25,731 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into 18361bd0d9664237a10f284e86a0e128(size=12.2 K), total size for store is 21.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:25,731 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:25,731 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=13, startTime=1734042984586; duration=0sec 2024-12-12T22:36:25,731 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:25,731 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:25,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043045860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,865 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:25,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:25,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:25,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:25,868 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:25,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043045866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:26,022 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,025 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=60.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/0a0a82ef82674caf9e7af99c006c5a39 2024-12-12T22:36:26,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/ea5a32d7a8b04f60904f2c75d7160b81 is 50, key is test_row_0/B:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:26,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742190_1366 (size=12151) 2024-12-12T22:36:26,174 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043046170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:26,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:26,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043046181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,379 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:26,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,551 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/ea5a32d7a8b04f60904f2c75d7160b81 2024-12-12T22:36:26,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/ba01147176a040e7982c6e4d3e25f897 is 50, key is test_row_0/C:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:26,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742191_1367 (size=12151) 2024-12-12T22:36:26,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/ba01147176a040e7982c6e4d3e25f897 2024-12-12T22:36:26,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/0a0a82ef82674caf9e7af99c006c5a39 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39 2024-12-12T22:36:26,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:26,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043046681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39, entries=150, sequenceid=200, filesize=30.4 K 2024-12-12T22:36:26,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:26,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043046697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,705 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/ea5a32d7a8b04f60904f2c75d7160b81 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81 2024-12-12T22:36:26,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] handler.RSProcedureHandler(58): pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=95 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=95 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:26,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T22:36:26,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/ba01147176a040e7982c6e4d3e25f897 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897 2024-12-12T22:36:26,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897, entries=150, sequenceid=200, filesize=11.9 K 2024-12-12T22:36:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 060ff996d98de0b1a764cdfe36e5b58b in 1229ms, sequenceid=200, compaction requested=true 2024-12-12T22:36:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:26,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:26,752 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:26,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:26,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:26,753 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:26,753 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:26,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,755 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85013 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:26,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,755 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:26,755 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,755 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=83.0 K 2024-12-12T22:36:26,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,755 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,755 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39] 2024-12-12T22:36:26,756 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31c4b410a0f64ffa84f87bc0ef189741, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:26,756 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:26,756 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:26,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,756 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,756 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e9e1d60d889a41a98f7aa0875192cd20, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.6 K 2024-12-12T22:36:26,756 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2404e80cb37a479cb68f40037151cff8, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042984264 2024-12-12T22:36:26,759 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a0a82ef82674caf9e7af99c006c5a39, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:26,759 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e9e1d60d889a41a98f7aa0875192cd20, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:26,759 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f7b4a708f91449d495cf499b498c8d1c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042984264 2024-12-12T22:36:26,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,761 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ea5a32d7a8b04f60904f2c75d7160b81, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:26,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,773 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:26,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,777 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:26,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,778 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/63822721ad7f4649b42e3e5d9d11dd56 is 50, key is test_row_0/B:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:26,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,780 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212f178fd41a8224212a50d90b734604c0f_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:26,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,782 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212f178fd41a8224212a50d90b734604c0f_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:26,782 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f178fd41a8224212a50d90b734604c0f_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:26,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742193_1369 (size=4469) 2024-12-12T22:36:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,848 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#306 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:26,849 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/cd6711078d964016a3d626472b0c1c26 is 175, key is test_row_0/A:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742192_1368 (size=12595) 2024-12-12T22:36:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,861 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:26,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=95 2024-12-12T22:36:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,866 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:26,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:26,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,880 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/63822721ad7f4649b42e3e5d9d11dd56 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/63822721ad7f4649b42e3e5d9d11dd56 2024-12-12T22:36:26,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742194_1370 (size=31549) 2024-12-12T22:36:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,896 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into 63822721ad7f4649b42e3e5d9d11dd56(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:26,896 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:26,896 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=13, startTime=1734042986752; duration=0sec 2024-12-12T22:36:26,896 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:26,896 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:26,896 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:26,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,900 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:26,900 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:26,900 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:26,900 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/18361bd0d9664237a10f284e86a0e128, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.6 K 2024-12-12T22:36:26,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126cf4f052b43d4e8d96d21661f40311be_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042985529/Put/seqid=0 2024-12-12T22:36:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,904 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 18361bd0d9664237a10f284e86a0e128, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734042982080 2024-12-12T22:36:26,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,904 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d0b7f913e38e4e94a3522cafe4aaa40a, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1734042984264 2024-12-12T22:36:26,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,907 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting ba01147176a040e7982c6e4d3e25f897, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:26,907 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/cd6711078d964016a3d626472b0c1c26 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26 2024-12-12T22:36:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,916 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into cd6711078d964016a3d626472b0c1c26(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:26,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,916 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:26,916 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=13, startTime=1734042986752; duration=0sec 2024-12-12T22:36:26,916 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:26,916 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:26,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742195_1371 (size=9814) 2024-12-12T22:36:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,961 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#309 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:26,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,961 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/f6a301c15de84b65a9fe83d8f5f41817 is 50, key is test_row_0/C:col10/1734042985195/Put/seqid=0 2024-12-12T22:36:26,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,974 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412126cf4f052b43d4e8d96d21661f40311be_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126cf4f052b43d4e8d96d21661f40311be_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:26,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/a6476cd8563742bf8e2e38552c03ce0c, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:26,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/a6476cd8563742bf8e2e38552c03ce0c is 175, key is test_row_0/A:col10/1734042985529/Put/seqid=0 2024-12-12T22:36:26,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742196_1372 (size=12595) 2024-12-12T22:36:27,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742197_1373 (size=22461) 2024-12-12T22:36:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,426 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/a6476cd8563742bf8e2e38552c03ce0c 2024-12-12T22:36:27,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,432 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/f6a301c15de84b65a9fe83d8f5f41817 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/f6a301c15de84b65a9fe83d8f5f41817 2024-12-12T22:36:27,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/cf0a76ca4a484812aad00f94963b76c7 is 50, key is test_row_0/B:col10/1734042985529/Put/seqid=0 2024-12-12T22:36:27,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,441 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into f6a301c15de84b65a9fe83d8f5f41817(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:27,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:27,442 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=13, startTime=1734042986752; duration=0sec 2024-12-12T22:36:27,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:27,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742198_1374 (size=9757) 2024-12-12T22:36:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:27,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:27,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:27,899 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/cf0a76ca4a484812aad00f94963b76c7 2024-12-12T22:36:27,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/37eede9780664b39940d3c6b715ff2f5 is 50, key is test_row_0/C:col10/1734042985529/Put/seqid=0 2024-12-12T22:36:28,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742199_1375 (size=9757) 2024-12-12T22:36:28,022 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/37eede9780664b39940d3c6b715ff2f5 2024-12-12T22:36:28,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/a6476cd8563742bf8e2e38552c03ce0c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c 2024-12-12T22:36:28,082 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c, entries=100, sequenceid=207, filesize=21.9 K 2024-12-12T22:36:28,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/cf0a76ca4a484812aad00f94963b76c7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7 2024-12-12T22:36:28,088 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7, entries=100, sequenceid=207, filesize=9.5 K 2024-12-12T22:36:28,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/37eede9780664b39940d3c6b715ff2f5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5 2024-12-12T22:36:28,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043048093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,111 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5, entries=100, sequenceid=207, filesize=9.5 K 2024-12-12T22:36:28,120 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 060ff996d98de0b1a764cdfe36e5b58b in 1255ms, sequenceid=207, compaction requested=false 2024-12-12T22:36:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:28,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=95}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=95 2024-12-12T22:36:28,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=95 2024-12-12T22:36:28,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-12-12T22:36:28,128 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7210 sec 2024-12-12T22:36:28,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=94, table=TestAcidGuarantees in 2.7280 sec 2024-12-12T22:36:28,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:28,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-12-12T22:36:28,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:28,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:28,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f3f81192ad804df996e96d9e7c3b066a_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:28,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043048180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043048224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742200_1376 (size=14794) 2024-12-12T22:36:28,255 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:28,268 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f3f81192ad804df996e96d9e7c3b066a_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f3f81192ad804df996e96d9e7c3b066a_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:28,269 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/bdac465a499d43eb8ca9113536114ce6, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:28,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/bdac465a499d43eb8ca9113536114ce6 is 175, key is test_row_0/A:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:28,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742201_1377 (size=39749) 2024-12-12T22:36:28,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043048297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043048435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043048523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,712 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=241, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/bdac465a499d43eb8ca9113536114ce6 2024-12-12T22:36:28,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043048738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/1eb7de5ca90242f0a4257fb165789e19 is 50, key is test_row_0/B:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:28,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742202_1378 (size=12151) 2024-12-12T22:36:28,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/1eb7de5ca90242f0a4257fb165789e19 2024-12-12T22:36:28,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:28,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043048833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:28,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 is 50, key is test_row_0/C:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:28,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742203_1379 (size=12151) 2024-12-12T22:36:29,249 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:29,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043049246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:29,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=241 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 2024-12-12T22:36:29,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:29,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043049340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:29,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/bdac465a499d43eb8ca9113536114ce6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6 2024-12-12T22:36:29,388 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6, entries=200, sequenceid=241, filesize=38.8 K 2024-12-12T22:36:29,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/1eb7de5ca90242f0a4257fb165789e19 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19 2024-12-12T22:36:29,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19, entries=150, sequenceid=241, filesize=11.9 K 2024-12-12T22:36:29,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 2024-12-12T22:36:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,421 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8, entries=150, sequenceid=241, filesize=11.9 K 2024-12-12T22:36:29,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 060ff996d98de0b1a764cdfe36e5b58b in 1287ms, sequenceid=241, compaction requested=true 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:29,423 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:29,423 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:29,423 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:29,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:29,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/63822721ad7f4649b42e3e5d9d11dd56, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.7 K 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93759 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:29,428 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:29,428 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=91.6 K 2024-12-12T22:36:29,428 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 63822721ad7f4649b42e3e5d9d11dd56, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:29,428 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6] 2024-12-12T22:36:29,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,429 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd6711078d964016a3d626472b0c1c26, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:29,429 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting cf0a76ca4a484812aad00f94963b76c7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042985529 2024-12-12T22:36:29,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,435 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6476cd8563742bf8e2e38552c03ce0c, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042985529 2024-12-12T22:36:29,436 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 1eb7de5ca90242f0a4257fb165789e19, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988040 2024-12-12T22:36:29,436 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdac465a499d43eb8ca9113536114ce6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988020 2024-12-12T22:36:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,456 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,461 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#316 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:29,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,461 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/b94c6ae2d62a40bcb48530a8d8cb12c9 is 50, key is test_row_0/B:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,462 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412127004e3ea8fcd4af2b7781d771abe0b4d_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:29,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,464 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412127004e3ea8fcd4af2b7781d771abe0b4d_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:29,464 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127004e3ea8fcd4af2b7781d771abe0b4d_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:29,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742205_1381 (size=4469) 2024-12-12T22:36:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,516 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#315 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:29,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,517 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/539454bd61fe4138b64c02f4e09892ff is 175, key is test_row_0/A:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:29,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742204_1380 (size=12697) 2024-12-12T22:36:29,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-12-12T22:36:29,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,537 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/b94c6ae2d62a40bcb48530a8d8cb12c9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b94c6ae2d62a40bcb48530a8d8cb12c9 2024-12-12T22:36:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,540 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-12-12T22:36:29,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,544 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:29,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees 2024-12-12T22:36:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,548 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:29,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:29,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,549 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=96, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:29,549 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:29,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,557 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into b94c6ae2d62a40bcb48530a8d8cb12c9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:29,557 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:29,557 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=13, startTime=1734042989423; duration=0sec 2024-12-12T22:36:29,558 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:29,558 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:29,558 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:29,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,564 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:29,564 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:29,564 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:29,564 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/f6a301c15de84b65a9fe83d8f5f41817, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.7 K 2024-12-12T22:36:29,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,564 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f6a301c15de84b65a9fe83d8f5f41817, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734042985195 2024-12-12T22:36:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,566 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 37eede9780664b39940d3c6b715ff2f5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734042985529 2024-12-12T22:36:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,571 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bb99b6ecfdd646fcacd49b6d4d6d97d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988040 2024-12-12T22:36:29,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742206_1382 (size=31651) 2024-12-12T22:36:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,609 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#317 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:29,609 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/32e03dec8e3e4ef2a6c9da78b866742b is 50, key is test_row_0/C:col10/1734042988093/Put/seqid=0 2024-12-12T22:36:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,620 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/539454bd61fe4138b64c02f4e09892ff as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff 2024-12-12T22:36:29,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,645 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into 539454bd61fe4138b64c02f4e09892ff(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:29,645 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:29,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,645 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=13, startTime=1734042989423; duration=0sec 2024-12-12T22:36:29,645 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:29,646 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742207_1383 (size=12697) 2024-12-12T22:36:29,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:29,704 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=97 2024-12-12T22:36:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:29,704 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-12T22:36:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:29,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:29,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:29,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:29,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212fd00ce6ecb614c3495f1c59258fdda51_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042988139/Put/seqid=0 2024-12-12T22:36:29,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742208_1384 (size=9814) 2024-12-12T22:36:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,839 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212fd00ce6ecb614c3495f1c59258fdda51_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd00ce6ecb614c3495f1c59258fdda51_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:29,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2640eca9309e417d96b23e34bd69d010, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2640eca9309e417d96b23e34bd69d010 is 175, key is test_row_0/A:col10/1734042988139/Put/seqid=0 2024-12-12T22:36:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742209_1385 (size=22461) 2024-12-12T22:36:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:29,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,061 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/32e03dec8e3e4ef2a6c9da78b866742b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32e03dec8e3e4ef2a6c9da78b866742b 2024-12-12T22:36:30,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,070 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into 32e03dec8e3e4ef2a6c9da78b866742b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:30,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,070 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:30,070 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=13, startTime=1734042989423; duration=0sec 2024-12-12T22:36:30,070 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:30,070 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:30,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,314 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=248, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2640eca9309e417d96b23e34bd69d010 2024-12-12T22:36:30,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:30,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:30,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/0859ed9219a74ecaa90f82f154cc45fa is 50, key is test_row_0/B:col10/1734042988139/Put/seqid=0 2024-12-12T22:36:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:30,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742210_1386 (size=9757) 2024-12-12T22:36:30,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043050430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043050436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043050436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043050439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043050443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043050543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043050548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043050550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043050551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043050551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:30,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043050750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043050757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043050760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043050761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:30,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043050761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:30,800 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/0859ed9219a74ecaa90f82f154cc45fa 2024-12-12T22:36:30,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/670b00d7a3bf47f59bd530f6b0f5acd8 is 50, key is test_row_0/C:col10/1734042988139/Put/seqid=0 2024-12-12T22:36:30,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742211_1387 (size=9757) 2024-12-12T22:36:30,928 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/670b00d7a3bf47f59bd530f6b0f5acd8 2024-12-12T22:36:30,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/2640eca9309e417d96b23e34bd69d010 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010 2024-12-12T22:36:30,974 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010, entries=100, sequenceid=248, filesize=21.9 K 2024-12-12T22:36:30,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/0859ed9219a74ecaa90f82f154cc45fa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa 2024-12-12T22:36:30,992 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa, entries=100, sequenceid=248, filesize=9.5 K 2024-12-12T22:36:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/670b00d7a3bf47f59bd530f6b0f5acd8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8 2024-12-12T22:36:31,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043051058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043051060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,063 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8, entries=100, sequenceid=248, filesize=9.5 K 2024-12-12T22:36:31,070 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=194.56 KB/199230 for 060ff996d98de0b1a764cdfe36e5b58b in 1366ms, sequenceid=248, compaction requested=false 2024-12-12T22:36:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:31,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=97}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=97 2024-12-12T22:36:31,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=97 2024-12-12T22:36:31,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-12T22:36:31,074 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5230 sec 2024-12-12T22:36:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:31,076 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=201.27 KB heapSize=528.09 KB 2024-12-12T22:36:31,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043051070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:31,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:31,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:31,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:31,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043051070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:31,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:31,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043051078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=96, table=TestAcidGuarantees in 1.5360 sec 2024-12-12T22:36:31,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212445a23ddc4904345b0918638a7414b91_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:31,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742212_1388 (size=14994) 2024-12-12T22:36:31,145 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:31,172 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212445a23ddc4904345b0918638a7414b91_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445a23ddc4904345b0918638a7414b91_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:31,180 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/8af79d6f4fec4b3fa6c1dc0bb3651004, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:31,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/8af79d6f4fec4b3fa6c1dc0bb3651004 is 175, key is test_row_0/A:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:31,188 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043051182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742213_1389 (size=39949) 2024-12-12T22:36:31,232 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:36:31,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043051391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41104 deadline: 1734043051563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41102 deadline: 1734043051565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41060 deadline: 1734043051579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41032 deadline: 1734043051581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,628 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=282, memsize=67.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/8af79d6f4fec4b3fa6c1dc0bb3651004 2024-12-12T22:36:31,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/6face68bb1ca40c681085b816521cb19 is 50, key is test_row_0/B:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-12T22:36:31,670 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-12T22:36:31,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:31,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees 2024-12-12T22:36:31,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:31,678 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:31,678 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=98, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:31,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:31,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742214_1390 (size=12301) 2024-12-12T22:36:31,704 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:31,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043051704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:31,831 DEBUG [Thread-1452 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:50645 2024-12-12T22:36:31,831 DEBUG [Thread-1452 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:31,832 DEBUG [Thread-1456 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:50645 2024-12-12T22:36:31,832 DEBUG [Thread-1456 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:31,834 DEBUG [Thread-1454 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:50645 2024-12-12T22:36:31,834 DEBUG [Thread-1454 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:31,836 DEBUG [Thread-1460 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x39387e4d to 127.0.0.1:50645 2024-12-12T22:36:31,836 DEBUG [Thread-1460 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:31,837 DEBUG [Thread-1458 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x627cad17 to 127.0.0.1:50645 2024-12-12T22:36:31,837 DEBUG [Thread-1458 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:31,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:31,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:31,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:31,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:31,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:31,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:31,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:31,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:31,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:31,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:31,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/6face68bb1ca40c681085b816521cb19 2024-12-12T22:36:32,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/236cd059113947f085ad26c4ee32a7fb is 50, key is test_row_0/C:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:32,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742215_1391 (size=12301) 2024-12-12T22:36:32,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:32,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:32,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:32,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41078 deadline: 1734043052210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:32,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:32,302 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:32,302 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:32,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:32,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,455 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:32,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:32,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. as already flushing 2024-12-12T22:36:32,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] handler.RSProcedureHandler(58): pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=99 java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=99 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:32,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=67.09 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/236cd059113947f085ad26c4ee32a7fb 2024-12-12T22:36:32,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/8af79d6f4fec4b3fa6c1dc0bb3651004 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004 2024-12-12T22:36:32,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004, entries=200, sequenceid=282, filesize=39.0 K 2024-12-12T22:36:32,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/6face68bb1ca40c681085b816521cb19 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19 2024-12-12T22:36:32,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T22:36:32,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/236cd059113947f085ad26c4ee32a7fb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb 2024-12-12T22:36:32,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb, entries=150, sequenceid=282, filesize=12.0 K 2024-12-12T22:36:32,533 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~201.27 KB/206100, heapSize ~528.05 KB/540720, currentSize=0 B/0 for 060ff996d98de0b1a764cdfe36e5b58b in 1457ms, sequenceid=282, compaction requested=true 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:32,533 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 060ff996d98de0b1a764cdfe36e5b58b:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:32,533 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:32,534 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:32,534 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:32,534 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/A is initiating minor compaction (all files) 2024-12-12T22:36:32,534 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/B is initiating minor compaction (all files) 2024-12-12T22:36:32,534 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/A in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,534 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/B in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,534 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b94c6ae2d62a40bcb48530a8d8cb12c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.9 K 2024-12-12T22:36:32,534 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=91.9 K 2024-12-12T22:36:32,534 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,534 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004] 2024-12-12T22:36:32,536 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 539454bd61fe4138b64c02f4e09892ff, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988040 2024-12-12T22:36:32,536 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b94c6ae2d62a40bcb48530a8d8cb12c9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988040 2024-12-12T22:36:32,536 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2640eca9309e417d96b23e34bd69d010, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734042988139 2024-12-12T22:36:32,536 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0859ed9219a74ecaa90f82f154cc45fa, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734042988139 2024-12-12T22:36:32,537 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8af79d6f4fec4b3fa6c1dc0bb3651004, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734042990391 2024-12-12T22:36:32,537 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6face68bb1ca40c681085b816521cb19, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734042990429 2024-12-12T22:36:32,549 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:32,550 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#B#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:32,551 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/e98d5ea7fdd2452ab1e98dceb79f0c1a is 50, key is test_row_0/B:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:32,551 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412128eacea3729d44d69986c26dc221f7b47_060ff996d98de0b1a764cdfe36e5b58b store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:32,555 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412128eacea3729d44d69986c26dc221f7b47_060ff996d98de0b1a764cdfe36e5b58b, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:32,555 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128eacea3729d44d69986c26dc221f7b47_060ff996d98de0b1a764cdfe36e5b58b because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:32,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742216_1392 (size=12949) 2024-12-12T22:36:32,574 DEBUG [Thread-1443 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:50645 2024-12-12T22:36:32,574 DEBUG [Thread-1445 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:50645 2024-12-12T22:36:32,574 DEBUG [Thread-1445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:32,574 DEBUG [Thread-1443 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:32,587 DEBUG [Thread-1449 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:50645 2024-12-12T22:36:32,587 DEBUG [Thread-1449 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:32,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742217_1393 (size=4469) 2024-12-12T22:36:32,594 DEBUG [Thread-1441 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:50645 2024-12-12T22:36:32,594 DEBUG [Thread-1441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:32,600 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#A#compaction#325 average throughput is 0.48 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:32,600 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/5b41cc5174014c64822ce28cacbf16d9 is 175, key is test_row_0/A:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:32,608 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:32,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=99 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:32,611 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:32,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:32,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742218_1394 (size=31903) 2024-12-12T22:36:32,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128413e90343d34ab18f1e2c40ac6f2989_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_0/A:col10/1734042992593/Put/seqid=0 2024-12-12T22:36:32,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742219_1395 (size=12454) 2024-12-12T22:36:32,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:32,977 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/e98d5ea7fdd2452ab1e98dceb79f0c1a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e98d5ea7fdd2452ab1e98dceb79f0c1a 2024-12-12T22:36:32,983 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/B of 060ff996d98de0b1a764cdfe36e5b58b into e98d5ea7fdd2452ab1e98dceb79f0c1a(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:32,983 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:32,983 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/B, priority=13, startTime=1734042992533; duration=0sec 2024-12-12T22:36:32,983 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:32,983 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:B 2024-12-12T22:36:32,983 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:33,000 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:33,001 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 060ff996d98de0b1a764cdfe36e5b58b/C is initiating minor compaction (all files) 2024-12-12T22:36:33,001 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 060ff996d98de0b1a764cdfe36e5b58b/C in TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:33,001 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32e03dec8e3e4ef2a6c9da78b866742b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp, totalSize=33.9 K 2024-12-12T22:36:33,002 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 32e03dec8e3e4ef2a6c9da78b866742b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=241, earliestPutTs=1734042988040 2024-12-12T22:36:33,002 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 670b00d7a3bf47f59bd530f6b0f5acd8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1734042988139 2024-12-12T22:36:33,002 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 236cd059113947f085ad26c4ee32a7fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1734042990429 2024-12-12T22:36:33,013 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 060ff996d98de0b1a764cdfe36e5b58b#C#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:33,013 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/8ff05db4eb5b4dd5808556b9ce2a9a71 is 50, key is test_row_0/C:col10/1734042990429/Put/seqid=0 2024-12-12T22:36:33,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742220_1396 (size=12949) 2024-12-12T22:36:33,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:33,052 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/5b41cc5174014c64822ce28cacbf16d9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/5b41cc5174014c64822ce28cacbf16d9 2024-12-12T22:36:33,055 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128413e90343d34ab18f1e2c40ac6f2989_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128413e90343d34ab18f1e2c40ac6f2989_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:33,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/25d57d51a3c5494ebf058ecb054de60e, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:33,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/25d57d51a3c5494ebf058ecb054de60e is 175, key is test_row_0/A:col10/1734042992593/Put/seqid=0 2024-12-12T22:36:33,061 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/A of 060ff996d98de0b1a764cdfe36e5b58b into 5b41cc5174014c64822ce28cacbf16d9(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:33,061 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:33,061 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/A, priority=13, startTime=1734042992533; duration=0sec 2024-12-12T22:36:33,061 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:33,061 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:A 2024-12-12T22:36:33,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742221_1397 (size=31255) 2024-12-12T22:36:33,218 DEBUG [Thread-1447 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:50645 2024-12-12T22:36:33,218 DEBUG [Thread-1447 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:33,436 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/8ff05db4eb5b4dd5808556b9ce2a9a71 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/8ff05db4eb5b4dd5808556b9ce2a9a71 2024-12-12T22:36:33,442 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 060ff996d98de0b1a764cdfe36e5b58b/C of 060ff996d98de0b1a764cdfe36e5b58b into 8ff05db4eb5b4dd5808556b9ce2a9a71(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:33,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:33,442 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b., storeName=060ff996d98de0b1a764cdfe36e5b58b/C, priority=13, startTime=1734042992533; duration=0sec 2024-12-12T22:36:33,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:33,442 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 060ff996d98de0b1a764cdfe36e5b58b:C 2024-12-12T22:36:33,467 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=289, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/25d57d51a3c5494ebf058ecb054de60e 2024-12-12T22:36:33,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/09f7f051a0e64876a76b4baa61345b40 is 50, key is test_row_0/B:col10/1734042992593/Put/seqid=0 2024-12-12T22:36:33,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742222_1398 (size=12301) 2024-12-12T22:36:33,484 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/09f7f051a0e64876a76b4baa61345b40 2024-12-12T22:36:33,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/32f72b5c6f0c40c7986b7fe31f091ea6 is 50, key is test_row_0/C:col10/1734042992593/Put/seqid=0 2024-12-12T22:36:33,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742223_1399 (size=12301) 2024-12-12T22:36:33,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:33,911 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/32f72b5c6f0c40c7986b7fe31f091ea6 2024-12-12T22:36:33,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/25d57d51a3c5494ebf058ecb054de60e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/25d57d51a3c5494ebf058ecb054de60e 2024-12-12T22:36:33,927 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/25d57d51a3c5494ebf058ecb054de60e, entries=150, sequenceid=289, filesize=30.5 K 2024-12-12T22:36:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/09f7f051a0e64876a76b4baa61345b40 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/09f7f051a0e64876a76b4baa61345b40 2024-12-12T22:36:33,937 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/09f7f051a0e64876a76b4baa61345b40, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T22:36:33,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/32f72b5c6f0c40c7986b7fe31f091ea6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32f72b5c6f0c40c7986b7fe31f091ea6 2024-12-12T22:36:33,942 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32f72b5c6f0c40c7986b7fe31f091ea6, entries=150, sequenceid=289, filesize=12.0 K 2024-12-12T22:36:33,944 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=6.71 KB/6870 for 060ff996d98de0b1a764cdfe36e5b58b in 1333ms, sequenceid=289, compaction requested=false 2024-12-12T22:36:33,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.HRegion(2538): Flush status journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:33,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:33,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=99}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=99 2024-12-12T22:36:33,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=99 2024-12-12T22:36:33,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-12-12T22:36:33,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2670 sec 2024-12-12T22:36:33,950 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=98, table=TestAcidGuarantees in 2.2770 sec 2024-12-12T22:36:35,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-12T22:36:35,781 INFO [Thread-1451 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 11 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 13 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 12 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 93 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1118 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3352 rows 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1137 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3407 rows 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1115 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3345 rows 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1107 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3321 rows 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1123 2024-12-12T22:36:35,781 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 3369 rows 2024-12-12T22:36:35,781 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:36:35,781 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:50645 2024-12-12T22:36:35,782 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:36:35,784 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:36:35,785 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:36:35,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:35,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:35,791 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042995790"}]},"ts":"1734042995790"} 2024-12-12T22:36:35,793 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:36:35,804 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:36:35,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:36:35,810 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, UNASSIGN}] 2024-12-12T22:36:35,816 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=102, ppid=101, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, UNASSIGN 2024-12-12T22:36:35,818 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=102 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:35,823 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:36:35,823 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; CloseRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:35,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:35,977 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:35,978 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] handler.UnassignRegionHandler(124): Close 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:35,978 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:36:35,978 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1681): Closing 060ff996d98de0b1a764cdfe36e5b58b, disabling compactions & flushes 2024-12-12T22:36:35,978 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:35,978 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:35,978 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. after waiting 0 ms 2024-12-12T22:36:35,978 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:35,978 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(2837): Flushing 060ff996d98de0b1a764cdfe36e5b58b 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=A 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=B 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 060ff996d98de0b1a764cdfe36e5b58b, store=C 2024-12-12T22:36:35,979 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:35,987 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212515d6a99ffcc4c0883a1c695534f47dc_060ff996d98de0b1a764cdfe36e5b58b is 50, key is test_row_1/A:col10/1734042993217/Put/seqid=0 2024-12-12T22:36:35,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742224_1400 (size=7374) 2024-12-12T22:36:36,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:36,203 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004] to archive 2024-12-12T22:36:36,208 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:36,217 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c129e2704b8442d893abb8664360d687 2024-12-12T22:36:36,217 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/3c125955fd4c4fd290b541a53e7579c2 2024-12-12T22:36:36,217 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/111c38ff52114e71835751ac7803948b 2024-12-12T22:36:36,217 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/f1ff543fba404df7841796b48c119766 2024-12-12T22:36:36,217 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/c881940a475f42ecb12b78a0ebdc645f 2024-12-12T22:36:36,218 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/6b740f5c3ae7406781bf06468cc31132 2024-12-12T22:36:36,220 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2cab89943ada4c04bb7b455d44a7bf92 2024-12-12T22:36:36,220 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/573e7fc8afce42fb9afb18a9e4268031 2024-12-12T22:36:36,222 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/9396072892884435834270deb3aa6223 2024-12-12T22:36:36,222 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2404e80cb37a479cb68f40037151cff8 2024-12-12T22:36:36,222 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/0a0a82ef82674caf9e7af99c006c5a39 2024-12-12T22:36:36,223 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/cd6711078d964016a3d626472b0c1c26 2024-12-12T22:36:36,223 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/31c4b410a0f64ffa84f87bc0ef189741 2024-12-12T22:36:36,223 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/bdac465a499d43eb8ca9113536114ce6 2024-12-12T22:36:36,223 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/d383e4f8b3cc4d789f35015784b89608 2024-12-12T22:36:36,225 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/539454bd61fe4138b64c02f4e09892ff 2024-12-12T22:36:36,225 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/2640eca9309e417d96b23e34bd69d010 2024-12-12T22:36:36,227 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/a6476cd8563742bf8e2e38552c03ce0c 2024-12-12T22:36:36,228 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/8af79d6f4fec4b3fa6c1dc0bb3651004 2024-12-12T22:36:36,240 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/2acda721638c4ea99b415fa75c59360c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/47df93af8eca480ca08d38cc6ccb324a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e9e1d60d889a41a98f7aa0875192cd20, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/63822721ad7f4649b42e3e5d9d11dd56, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b94c6ae2d62a40bcb48530a8d8cb12c9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19] to archive 2024-12-12T22:36:36,241 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:36,247 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/41370f4404be4004a4254d146038b167 2024-12-12T22:36:36,248 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/972bbb732137449897e3f70c6a0b07fb 2024-12-12T22:36:36,249 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b14dbabc344c4046832a4698af11b2a2 2024-12-12T22:36:36,250 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/c25bf4f4e34f4ffc97ef6ca3327b893e 2024-12-12T22:36:36,251 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/2acda721638c4ea99b415fa75c59360c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/2acda721638c4ea99b415fa75c59360c 2024-12-12T22:36:36,251 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/355faa90cac445c0a7dd71d5f193dd65 2024-12-12T22:36:36,253 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/16e3681103c8492e97fc82321b6ede3a 2024-12-12T22:36:36,254 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/52b0ac11b77948a7a728722e0026f500 2024-12-12T22:36:36,254 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e9e1d60d889a41a98f7aa0875192cd20 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e9e1d60d889a41a98f7aa0875192cd20 2024-12-12T22:36:36,255 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/074467a11cd34b689de507a55733d961 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/f7b4a708f91449d495cf499b498c8d1c 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/cf0a76ca4a484812aad00f94963b76c7 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/ea5a32d7a8b04f60904f2c75d7160b81 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/63822721ad7f4649b42e3e5d9d11dd56 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/63822721ad7f4649b42e3e5d9d11dd56 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/47df93af8eca480ca08d38cc6ccb324a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/47df93af8eca480ca08d38cc6ccb324a 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b94c6ae2d62a40bcb48530a8d8cb12c9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/b94c6ae2d62a40bcb48530a8d8cb12c9 2024-12-12T22:36:36,257 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/1eb7de5ca90242f0a4257fb165789e19 2024-12-12T22:36:36,258 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/0859ed9219a74ecaa90f82f154cc45fa 2024-12-12T22:36:36,259 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/6face68bb1ca40c681085b816521cb19 2024-12-12T22:36:36,263 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/99ed3cb854054e39b2d8744258c577c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bd78db39c33d498ba29b5b5681479180, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/18361bd0d9664237a10f284e86a0e128, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/f6a301c15de84b65a9fe83d8f5f41817, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32e03dec8e3e4ef2a6c9da78b866742b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb] to archive 2024-12-12T22:36:36,267 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/1aef280cf0a8:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bd78db39c33d498ba29b5b5681479180 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bd78db39c33d498ba29b5b5681479180 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/26b311ad813c4c2f9e11348c0518f673 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/99ed3cb854054e39b2d8744258c577c2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/99ed3cb854054e39b2d8744258c577c2 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/65c4d7a5b3154e65bb455c9ceea0cda2 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/cdd37298653e474f96e552accf147a7f 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/7e19b0fdad1247d593a9866c75a406a8 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/61732b4e1c9c463b8b9136583b4a7b02 2024-12-12T22:36:36,284 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/b801185a7329429dbc3bdb4354fb16df 2024-12-12T22:36:36,290 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/18361bd0d9664237a10f284e86a0e128 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/18361bd0d9664237a10f284e86a0e128 2024-12-12T22:36:36,290 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/6df9902d79c84c84943b62c50c97639f 2024-12-12T22:36:36,291 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/e24fdc073c8f483ab434d7fa7fc04075 2024-12-12T22:36:36,291 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/ba01147176a040e7982c6e4d3e25f897 2024-12-12T22:36:36,291 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/f6a301c15de84b65a9fe83d8f5f41817 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/f6a301c15de84b65a9fe83d8f5f41817 2024-12-12T22:36:36,291 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/37eede9780664b39940d3c6b715ff2f5 2024-12-12T22:36:36,291 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32e03dec8e3e4ef2a6c9da78b866742b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32e03dec8e3e4ef2a6c9da78b866742b 2024-12-12T22:36:36,293 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/d0b7f913e38e4e94a3522cafe4aaa40a 2024-12-12T22:36:36,302 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/236cd059113947f085ad26c4ee32a7fb 2024-12-12T22:36:36,302 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/670b00d7a3bf47f59bd530f6b0f5acd8 2024-12-12T22:36:36,302 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/bb99b6ecfdd646fcacd49b6d4d6d97d8 2024-12-12T22:36:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:36,395 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:36,408 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212515d6a99ffcc4c0883a1c695534f47dc_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212515d6a99ffcc4c0883a1c695534f47dc_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:36,410 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/625d4ecd647b445c9c87413ad30c9a70, store: [table=TestAcidGuarantees family=A region=060ff996d98de0b1a764cdfe36e5b58b] 2024-12-12T22:36:36,411 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/625d4ecd647b445c9c87413ad30c9a70 is 175, key is test_row_1/A:col10/1734042993217/Put/seqid=0 2024-12-12T22:36:36,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742225_1401 (size=13865) 2024-12-12T22:36:36,449 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=296, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/625d4ecd647b445c9c87413ad30c9a70 2024-12-12T22:36:36,461 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/4b60516746194d7498ca96729a4cd09e is 50, key is test_row_1/B:col10/1734042993217/Put/seqid=0 2024-12-12T22:36:36,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742226_1402 (size=7415) 2024-12-12T22:36:36,468 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/4b60516746194d7498ca96729a4cd09e 2024-12-12T22:36:36,497 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/fd2eb2fb510547e1bc2c47395994835c is 50, key is test_row_1/C:col10/1734042993217/Put/seqid=0 2024-12-12T22:36:36,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742227_1403 (size=7415) 2024-12-12T22:36:36,527 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=296 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/fd2eb2fb510547e1bc2c47395994835c 2024-12-12T22:36:36,551 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/A/625d4ecd647b445c9c87413ad30c9a70 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/625d4ecd647b445c9c87413ad30c9a70 2024-12-12T22:36:36,567 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/625d4ecd647b445c9c87413ad30c9a70, entries=50, sequenceid=296, filesize=13.5 K 2024-12-12T22:36:36,572 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/B/4b60516746194d7498ca96729a4cd09e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/4b60516746194d7498ca96729a4cd09e 2024-12-12T22:36:36,587 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/4b60516746194d7498ca96729a4cd09e, entries=50, sequenceid=296, filesize=7.2 K 2024-12-12T22:36:36,591 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/.tmp/C/fd2eb2fb510547e1bc2c47395994835c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/fd2eb2fb510547e1bc2c47395994835c 2024-12-12T22:36:36,595 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/fd2eb2fb510547e1bc2c47395994835c, entries=50, sequenceid=296, filesize=7.2 K 2024-12-12T22:36:36,599 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 060ff996d98de0b1a764cdfe36e5b58b in 621ms, sequenceid=296, compaction requested=true 2024-12-12T22:36:36,630 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits/299.seqid, newMaxSeqId=299, maxSeqId=4 2024-12-12T22:36:36,631 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b. 2024-12-12T22:36:36,631 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] regionserver.HRegion(1635): Region close journal for 060ff996d98de0b1a764cdfe36e5b58b: 2024-12-12T22:36:36,637 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=102 updating hbase:meta row=060ff996d98de0b1a764cdfe36e5b58b, regionState=CLOSED 2024-12-12T22:36:36,639 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=103}] handler.UnassignRegionHandler(170): Closed 060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:36,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-12T22:36:36,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; CloseRegionProcedure 060ff996d98de0b1a764cdfe36e5b58b, server=1aef280cf0a8,36025,1734042873576 in 815 msec 2024-12-12T22:36:36,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-12T22:36:36,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=060ff996d98de0b1a764cdfe36e5b58b, UNASSIGN in 831 msec 2024-12-12T22:36:36,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-12T22:36:36,648 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 841 msec 2024-12-12T22:36:36,649 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042996649"}]},"ts":"1734042996649"} 2024-12-12T22:36:36,656 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:36:36,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:36,934 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:36:36,937 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1500 sec 2024-12-12T22:36:37,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-12T22:36:37,900 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-12T22:36:37,901 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:36:37,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:37,905 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=104, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:37,906 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=104, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T22:36:37,908 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:37,932 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits] 2024-12-12T22:36:37,977 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/25d57d51a3c5494ebf058ecb054de60e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/25d57d51a3c5494ebf058ecb054de60e 2024-12-12T22:36:37,977 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/625d4ecd647b445c9c87413ad30c9a70 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/625d4ecd647b445c9c87413ad30c9a70 2024-12-12T22:36:37,979 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/5b41cc5174014c64822ce28cacbf16d9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/A/5b41cc5174014c64822ce28cacbf16d9 2024-12-12T22:36:37,995 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/09f7f051a0e64876a76b4baa61345b40 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/09f7f051a0e64876a76b4baa61345b40 2024-12-12T22:36:37,996 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/4b60516746194d7498ca96729a4cd09e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/4b60516746194d7498ca96729a4cd09e 2024-12-12T22:36:37,996 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e98d5ea7fdd2452ab1e98dceb79f0c1a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/B/e98d5ea7fdd2452ab1e98dceb79f0c1a 2024-12-12T22:36:38,000 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/fd2eb2fb510547e1bc2c47395994835c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/fd2eb2fb510547e1bc2c47395994835c 2024-12-12T22:36:38,000 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32f72b5c6f0c40c7986b7fe31f091ea6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/32f72b5c6f0c40c7986b7fe31f091ea6 2024-12-12T22:36:38,001 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/8ff05db4eb5b4dd5808556b9ce2a9a71 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/C/8ff05db4eb5b4dd5808556b9ce2a9a71 2024-12-12T22:36:38,005 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits/299.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b/recovered.edits/299.seqid 2024-12-12T22:36:38,006 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,006 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:36:38,007 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:36:38,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T22:36:38,008 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T22:36:38,019 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121217d5ace6c06f4916b1b5c4752e533e1a_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121217d5ace6c06f4916b1b5c4752e533e1a_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,019 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121225c2af21106b4ab4a77e0e42cc2b57d7_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121225c2af21106b4ab4a77e0e42cc2b57d7_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,020 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121220c17a0880fe4cc89afeb3f7ef8b4e95_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121220c17a0880fe4cc89afeb3f7ef8b4e95_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,020 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212515d6a99ffcc4c0883a1c695534f47dc_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212515d6a99ffcc4c0883a1c695534f47dc_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,020 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445a23ddc4904345b0918638a7414b91_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212445a23ddc4904345b0918638a7414b91_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,020 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121251741dbda29c489abbe3054074906d2e_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121251741dbda29c489abbe3054074906d2e_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,021 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123a6c69ba1e6147feb9077d821ad16c18_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123a6c69ba1e6147feb9077d821ad16c18_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b4db2c711d9498a9e7998561d983f34_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126b4db2c711d9498a9e7998561d983f34_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126cf4f052b43d4e8d96d21661f40311be_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412126cf4f052b43d4e8d96d21661f40311be_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128413e90343d34ab18f1e2c40ac6f2989_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128413e90343d34ab18f1e2c40ac6f2989_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289d93e8198454ad894268398f656f47d_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121289d93e8198454ad894268398f656f47d_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f3f81192ad804df996e96d9e7c3b066a_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f3f81192ad804df996e96d9e7c3b066a_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212dcff3e2ad413458497d0f48d153bed5f_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212dcff3e2ad413458497d0f48d153bed5f_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,022 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ec4553355b784172a7a4173f0c302daa_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212ec4553355b784172a7a4173f0c302daa_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,023 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125e101486f7f34df5976cf1b509870879_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125e101486f7f34df5976cf1b509870879_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,023 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd00ce6ecb614c3495f1c59258fdda51_060ff996d98de0b1a764cdfe36e5b58b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212fd00ce6ecb614c3495f1c59258fdda51_060ff996d98de0b1a764cdfe36e5b58b 2024-12-12T22:36:38,024 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:36:38,027 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=104, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:38,030 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:36:38,037 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:36:38,038 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=104, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:38,038 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:36:38,038 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734042998038"}]},"ts":"9223372036854775807"} 2024-12-12T22:36:38,040 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:36:38,040 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 060ff996d98de0b1a764cdfe36e5b58b, NAME => 'TestAcidGuarantees,,1734042969622.060ff996d98de0b1a764cdfe36e5b58b.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:36:38,040 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:36:38,040 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734042998040"}]},"ts":"9223372036854775807"} 2024-12-12T22:36:38,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:36:38,060 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=104, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:38,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 159 msec 2024-12-12T22:36:38,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-12T22:36:38,208 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-12T22:36:38,220 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=246 (was 243) - Thread LEAK? -, OpenFileDescriptor=463 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1631 (was 1480) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4070 (was 4068) - AvailableMemoryMB LEAK? - 2024-12-12T22:36:38,230 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=246, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=1631, ProcessCount=11, AvailableMemoryMB=4068 2024-12-12T22:36:38,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:36:38,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:36:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:36:38,233 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:36:38,233 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:38,233 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 105 2024-12-12T22:36:38,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:38,234 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:36:38,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742228_1404 (size=963) 2024-12-12T22:36:38,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:38,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:38,655 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:36:38,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742229_1405 (size=53) 2024-12-12T22:36:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:39,063 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:36:39,064 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing ead55ce3707e32db5ec1e629ea38c388, disabling compactions & flushes 2024-12-12T22:36:39,064 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,064 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,064 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. after waiting 0 ms 2024-12-12T22:36:39,064 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,064 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,064 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:39,065 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:36:39,065 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734042999065"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734042999065"}]},"ts":"1734042999065"} 2024-12-12T22:36:39,067 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:36:39,068 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:36:39,068 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042999068"}]},"ts":"1734042999068"} 2024-12-12T22:36:39,073 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:36:39,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, ASSIGN}] 2024-12-12T22:36:39,101 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, ASSIGN 2024-12-12T22:36:39,102 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:36:39,253 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=106 updating hbase:meta row=ead55ce3707e32db5ec1e629ea38c388, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:39,254 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; OpenRegionProcedure ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:36:39,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:39,406 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:39,410 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,410 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7285): Opening region: {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:36:39,410 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,410 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:36:39,410 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7327): checking encryption for ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,410 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(7330): checking classloading for ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,412 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,420 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:39,420 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ead55ce3707e32db5ec1e629ea38c388 columnFamilyName A 2024-12-12T22:36:39,421 DEBUG [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:39,421 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(327): Store=ead55ce3707e32db5ec1e629ea38c388/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:39,421 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,423 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:39,423 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ead55ce3707e32db5ec1e629ea38c388 columnFamilyName B 2024-12-12T22:36:39,423 DEBUG [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:39,424 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(327): Store=ead55ce3707e32db5ec1e629ea38c388/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:39,424 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,426 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:36:39,426 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ead55ce3707e32db5ec1e629ea38c388 columnFamilyName C 2024-12-12T22:36:39,426 DEBUG [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:36:39,426 INFO [StoreOpener-ead55ce3707e32db5ec1e629ea38c388-1 {}] regionserver.HStore(327): Store=ead55ce3707e32db5ec1e629ea38c388/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:36:39,427 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,428 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,428 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,430 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:36:39,431 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1085): writing seq id for ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:39,439 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:36:39,441 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1102): Opened ead55ce3707e32db5ec1e629ea38c388; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67215356, jitterRate=0.0015868544578552246}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:36:39,442 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegion(1001): Region open journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:39,451 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., pid=107, masterSystemTime=1734042999406 2024-12-12T22:36:39,463 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,464 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=107}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:39,471 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=106 updating hbase:meta row=ead55ce3707e32db5ec1e629ea38c388, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:39,475 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-12T22:36:39,475 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; OpenRegionProcedure ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 in 219 msec 2024-12-12T22:36:39,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=106, resume processing ppid=105 2024-12-12T22:36:39,478 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=105, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, ASSIGN in 376 msec 2024-12-12T22:36:39,478 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:36:39,479 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734042999479"}]},"ts":"1734042999479"} 2024-12-12T22:36:39,480 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:36:39,539 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:36:39,550 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3070 sec 2024-12-12T22:36:40,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=105 2024-12-12T22:36:40,338 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 105 completed 2024-12-12T22:36:40,340 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-12-12T22:36:40,385 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,386 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,388 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,389 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:36:40,390 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:36:40,392 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-12T22:36:40,410 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,411 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-12T22:36:40,424 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,425 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-12-12T22:36:40,444 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,445 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bc486e1 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11193a0c 2024-12-12T22:36:40,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d672ed2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,462 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2070263a to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7861b162 2024-12-12T22:36:40,472 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf40102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,472 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-12-12T22:36:40,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,530 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-12T22:36:40,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,596 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-12-12T22:36:40,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,633 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x79d49886 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@73d92042 2024-12-12T22:36:40,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c692575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,681 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x635b1751 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@593af048 2024-12-12T22:36:40,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cbd2497, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:36:40,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:40,750 DEBUG [hconnection-0x4974d6c6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,751 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,751 DEBUG [hconnection-0x1486719b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-12T22:36:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T22:36:40,753 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34762, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,757 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:40,758 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:40,759 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:40,762 DEBUG [hconnection-0x5c48ff7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,763 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,770 DEBUG [hconnection-0x185bbc7e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,773 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,779 DEBUG [hconnection-0x7758bcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,784 DEBUG [hconnection-0x43b8f741-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:40,784 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34794, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:36:40,787 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:40,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:40,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:40,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:40,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:40,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:40,796 DEBUG [hconnection-0x43938c52-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,798 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,799 DEBUG [hconnection-0x2c21c92-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,801 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,807 DEBUG [hconnection-0x1412a397-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,813 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,814 DEBUG [hconnection-0x2757d039-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:36:40,820 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:36:40,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/5f6a690bbf104d24930fe60a55e265b6 is 50, key is test_row_0/A:col10/1734043000784/Put/seqid=0 2024-12-12T22:36:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T22:36:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043060864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043060865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043060871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043060872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043060872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742230_1406 (size=12001) 2024-12-12T22:36:40,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/5f6a690bbf104d24930fe60a55e265b6 2024-12-12T22:36:40,914 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T22:36:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:40,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:40,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:40,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:40,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:40,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:40,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/077e54bd055541b0877ed2a02f395684 is 50, key is test_row_0/B:col10/1734043000784/Put/seqid=0 2024-12-12T22:36:40,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043060976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043060976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043060976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,979 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043060978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:40,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:40,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043060983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742231_1407 (size=12001) 2024-12-12T22:36:41,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/077e54bd055541b0877ed2a02f395684 2024-12-12T22:36:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T22:36:41,075 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T22:36:41,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/45c9daef44644420ad4c3b40451b8152 is 50, key is test_row_0/C:col10/1734043000784/Put/seqid=0 2024-12-12T22:36:41,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742232_1408 (size=12001) 2024-12-12T22:36:41,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043061181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043061183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043061183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/45c9daef44644420ad4c3b40451b8152 2024-12-12T22:36:41,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043061188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043061189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/5f6a690bbf104d24930fe60a55e265b6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6 2024-12-12T22:36:41,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T22:36:41,239 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/077e54bd055541b0877ed2a02f395684 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684 2024-12-12T22:36:41,243 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T22:36:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,247 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:41,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T22:36:41,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/45c9daef44644420ad4c3b40451b8152 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152 2024-12-12T22:36:41,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152, entries=150, sequenceid=14, filesize=11.7 K 2024-12-12T22:36:41,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for ead55ce3707e32db5ec1e629ea38c388 in 525ms, sequenceid=14, compaction requested=false 2024-12-12T22:36:41,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:41,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T22:36:41,415 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-12T22:36:41,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,421 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:41,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3ccd56fbf3cb4cff9acb43b087818e88 is 50, key is test_row_0/A:col10/1734043000864/Put/seqid=0 2024-12-12T22:36:41,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:41,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:41,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742233_1409 (size=12001) 2024-12-12T22:36:41,517 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3ccd56fbf3cb4cff9acb43b087818e88 2024-12-12T22:36:41,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043061543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043061545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043061545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043061546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043061551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/d410030255a64a93bfb75a7ea07af751 is 50, key is test_row_0/B:col10/1734043000864/Put/seqid=0 2024-12-12T22:36:41,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742234_1410 (size=12001) 2024-12-12T22:36:41,619 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/d410030255a64a93bfb75a7ea07af751 2024-12-12T22:36:41,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043061657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043061658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/110576b3cc6e4d22b60ea610bda9b6ed is 50, key is test_row_0/C:col10/1734043000864/Put/seqid=0 2024-12-12T22:36:41,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043061666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043061668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043061668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742235_1411 (size=12001) 2024-12-12T22:36:41,719 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/110576b3cc6e4d22b60ea610bda9b6ed 2024-12-12T22:36:41,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3ccd56fbf3cb4cff9acb43b087818e88 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88 2024-12-12T22:36:41,784 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:36:41,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/d410030255a64a93bfb75a7ea07af751 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751 2024-12-12T22:36:41,802 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:36:41,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/110576b3cc6e4d22b60ea610bda9b6ed as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed 2024-12-12T22:36:41,812 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed, entries=150, sequenceid=38, filesize=11.7 K 2024-12-12T22:36:41,819 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for ead55ce3707e32db5ec1e629ea38c388 in 398ms, sequenceid=38, compaction requested=false 2024-12-12T22:36:41,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:41,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-12T22:36:41,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-12T22:36:41,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-12T22:36:41,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0690 sec 2024-12-12T22:36:41,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.0850 sec 2024-12-12T22:36:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-12T22:36:41,863 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-12T22:36:41,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:41,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-12T22:36:41,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:41,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:41,876 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:41,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:41,881 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:41,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:41,913 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/826b28d8b73f437a9369c25955f94e1d is 50, key is test_row_0/A:col10/1734043001543/Put/seqid=0 2024-12-12T22:36:41,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742236_1412 (size=12001) 2024-12-12T22:36:41,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/826b28d8b73f437a9369c25955f94e1d 2024-12-12T22:36:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:41,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043061958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043061958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043061972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043061974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:41,986 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:41,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043061974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0d8519bdec6544a38311285073d08589 is 50, key is test_row_0/B:col10/1734043001543/Put/seqid=0 2024-12-12T22:36:42,034 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742237_1413 (size=12001) 2024-12-12T22:36:42,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043062075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0d8519bdec6544a38311285073d08589 2024-12-12T22:36:42,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043062086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043062086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043062090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043062095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/0e1ccc82ab0943bc8ceaa1073dedf48d is 50, key is test_row_0/C:col10/1734043001543/Put/seqid=0 2024-12-12T22:36:42,167 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:36:42,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742238_1414 (size=12001) 2024-12-12T22:36:42,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:42,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043062283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043062292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043062292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043062303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043062308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,382 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,384 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:42,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:42,537 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,538 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/0e1ccc82ab0943bc8ceaa1073dedf48d 2024-12-12T22:36:42,600 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043062596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043062600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043062607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/826b28d8b73f437a9369c25955f94e1d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d 2024-12-12T22:36:42,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043062612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:42,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043062612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d, entries=150, sequenceid=53, filesize=11.7 K 2024-12-12T22:36:42,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0d8519bdec6544a38311285073d08589 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589 2024-12-12T22:36:42,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589, entries=150, sequenceid=53, filesize=11.7 K 2024-12-12T22:36:42,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/0e1ccc82ab0943bc8ceaa1073dedf48d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d 2024-12-12T22:36:42,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,697 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:42,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,697 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:42,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d, entries=150, sequenceid=53, filesize=11.7 K 2024-12-12T22:36:42,706 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for ead55ce3707e32db5ec1e629ea38c388 in 825ms, sequenceid=53, compaction requested=true 2024-12-12T22:36:42,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:42,707 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:42,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:42,707 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:42,708 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:42,708 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:42,708 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,708 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.2 K 2024-12-12T22:36:42,708 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:42,708 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:42,708 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,709 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.2 K 2024-12-12T22:36:42,714 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 077e54bd055541b0877ed2a02f395684, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734043000773 2024-12-12T22:36:42,714 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f6a690bbf104d24930fe60a55e265b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734043000773 2024-12-12T22:36:42,715 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ccd56fbf3cb4cff9acb43b087818e88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734043000823 2024-12-12T22:36:42,715 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d410030255a64a93bfb75a7ea07af751, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734043000823 2024-12-12T22:36:42,715 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 826b28d8b73f437a9369c25955f94e1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734043001543 2024-12-12T22:36:42,718 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d8519bdec6544a38311285073d08589, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734043001543 2024-12-12T22:36:42,740 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:42,741 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/cd56c5067fc14891a46265f11a9b5eed is 50, key is test_row_0/A:col10/1734043001543/Put/seqid=0 2024-12-12T22:36:42,741 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#343 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:42,741 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5570490a091044bf86806134abd41a29 is 50, key is test_row_0/B:col10/1734043001543/Put/seqid=0 2024-12-12T22:36:42,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742239_1415 (size=12104) 2024-12-12T22:36:42,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742240_1416 (size=12104) 2024-12-12T22:36:42,851 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:42,853 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:42,854 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:42,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:42,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/37a2d379e4be4e349535c7bb1408fd58 is 50, key is test_row_0/A:col10/1734043001967/Put/seqid=0 2024-12-12T22:36:42,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742241_1417 (size=12001) 2024-12-12T22:36:42,913 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/37a2d379e4be4e349535c7bb1408fd58 2024-12-12T22:36:42,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a90b06d6438a42e7b0b211a1206de583 is 50, key is test_row_0/B:col10/1734043001967/Put/seqid=0 2024-12-12T22:36:42,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742242_1418 (size=12001) 2024-12-12T22:36:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:42,987 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a90b06d6438a42e7b0b211a1206de583 2024-12-12T22:36:43,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/d186c4f3fed34b8295f8e9924f365dd3 is 50, key is test_row_0/C:col10/1734043001967/Put/seqid=0 2024-12-12T22:36:43,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742243_1419 (size=12001) 2024-12-12T22:36:43,071 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/d186c4f3fed34b8295f8e9924f365dd3 2024-12-12T22:36:43,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/37a2d379e4be4e349535c7bb1408fd58 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58 2024-12-12T22:36:43,108 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T22:36:43,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:43,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:43,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a90b06d6438a42e7b0b211a1206de583 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583 2024-12-12T22:36:43,128 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T22:36:43,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/d186c4f3fed34b8295f8e9924f365dd3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3 2024-12-12T22:36:43,148 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3, entries=150, sequenceid=75, filesize=11.7 K 2024-12-12T22:36:43,149 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=67.09 KB/68700 for ead55ce3707e32db5ec1e629ea38c388 in 295ms, sequenceid=75, compaction requested=true 2024-12-12T22:36:43,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:43,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:43,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-12T22:36:43,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-12T22:36:43,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:43,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:43,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-12T22:36:43,161 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2820 sec 2024-12-12T22:36:43,163 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.2930 sec 2024-12-12T22:36:43,182 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a38d53792f87408aa5c7b65f5ed24de7 is 50, key is test_row_0/A:col10/1734043003145/Put/seqid=0 2024-12-12T22:36:43,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742244_1420 (size=16681) 2024-12-12T22:36:43,229 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/cd56c5067fc14891a46265f11a9b5eed as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cd56c5067fc14891a46265f11a9b5eed 2024-12-12T22:36:43,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a38d53792f87408aa5c7b65f5ed24de7 2024-12-12T22:36:43,260 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into cd56c5067fc14891a46265f11a9b5eed(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:43,260 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:43,260 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043002706; duration=0sec 2024-12-12T22:36:43,261 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:43,261 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:43,261 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:43,266 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5570490a091044bf86806134abd41a29 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5570490a091044bf86806134abd41a29 2024-12-12T22:36:43,270 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:43,270 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:43,270 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:43,270 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=46.9 K 2024-12-12T22:36:43,271 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45c9daef44644420ad4c3b40451b8152, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734043000773 2024-12-12T22:36:43,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5b55edb621e24bcfa9856f162731306f is 50, key is test_row_0/B:col10/1734043003145/Put/seqid=0 2024-12-12T22:36:43,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043063250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043063264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043063264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043063266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043063270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,285 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 110576b3cc6e4d22b60ea610bda9b6ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734043000823 2024-12-12T22:36:43,287 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e1ccc82ab0943bc8ceaa1073dedf48d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734043001543 2024-12-12T22:36:43,291 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d186c4f3fed34b8295f8e9924f365dd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734043001890 2024-12-12T22:36:43,304 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 5570490a091044bf86806134abd41a29(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:43,304 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:43,304 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043002707; duration=0sec 2024-12-12T22:36:43,304 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:43,304 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:43,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742245_1421 (size=12001) 2024-12-12T22:36:43,327 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#349 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:43,327 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f63dcab337df4b5aa1ceb0921e947339 is 50, key is test_row_0/C:col10/1734043001967/Put/seqid=0 2024-12-12T22:36:43,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5b55edb621e24bcfa9856f162731306f 2024-12-12T22:36:43,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/4eb4ae51f0904b4c9cea7250cfca9b57 is 50, key is test_row_0/C:col10/1734043003145/Put/seqid=0 2024-12-12T22:36:43,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742246_1422 (size=12139) 2024-12-12T22:36:43,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043063383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043063383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742247_1423 (size=12001) 2024-12-12T22:36:43,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043063384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043063385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043063385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,422 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f63dcab337df4b5aa1ceb0921e947339 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f63dcab337df4b5aa1ceb0921e947339 2024-12-12T22:36:43,435 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into f63dcab337df4b5aa1ceb0921e947339(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:43,435 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:43,435 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=12, startTime=1734043002707; duration=0sec 2024-12-12T22:36:43,435 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:43,435 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:43,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043063594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043063594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043063594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043063598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043063598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/4eb4ae51f0904b4c9cea7250cfca9b57 2024-12-12T22:36:43,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a38d53792f87408aa5c7b65f5ed24de7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7 2024-12-12T22:36:43,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7, entries=250, sequenceid=90, filesize=16.3 K 2024-12-12T22:36:43,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5b55edb621e24bcfa9856f162731306f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f 2024-12-12T22:36:43,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f, entries=150, sequenceid=90, filesize=11.7 K 2024-12-12T22:36:43,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/4eb4ae51f0904b4c9cea7250cfca9b57 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57 2024-12-12T22:36:43,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57, entries=150, sequenceid=90, filesize=11.7 K 2024-12-12T22:36:43,861 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for ead55ce3707e32db5ec1e629ea38c388 in 706ms, sequenceid=90, compaction requested=true 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:43,861 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:43,861 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:43,861 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:43,864 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:43,864 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:43,864 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:43,864 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cd56c5067fc14891a46265f11a9b5eed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=39.8 K 2024-12-12T22:36:43,864 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd56c5067fc14891a46265f11a9b5eed, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734043001543 2024-12-12T22:36:43,866 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37a2d379e4be4e349535c7bb1408fd58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734043001890 2024-12-12T22:36:43,866 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:43,867 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:43,867 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:43,867 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5570490a091044bf86806134abd41a29, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.3 K 2024-12-12T22:36:43,867 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting a38d53792f87408aa5c7b65f5ed24de7, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734043003129 2024-12-12T22:36:43,868 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5570490a091044bf86806134abd41a29, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1734043001543 2024-12-12T22:36:43,868 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a90b06d6438a42e7b0b211a1206de583, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734043001890 2024-12-12T22:36:43,869 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b55edb621e24bcfa9856f162731306f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734043003145 2024-12-12T22:36:43,882 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#351 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:43,883 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/349d932a2e7b4944bb0c613d298e43a5 is 50, key is test_row_0/A:col10/1734043003145/Put/seqid=0 2024-12-12T22:36:43,887 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#352 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:43,887 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/c5b328803e9b4a2881d951e876786e99 is 50, key is test_row_0/B:col10/1734043003145/Put/seqid=0 2024-12-12T22:36:43,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:43,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:36:43,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742248_1424 (size=12207) 2024-12-12T22:36:43,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:43,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:43,923 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:43,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:43,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742249_1425 (size=12207) 2024-12-12T22:36:43,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e0b01c2914e447b4b3d0c212172f2758 is 50, key is test_row_0/A:col10/1734043003912/Put/seqid=0 2024-12-12T22:36:43,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043063946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043063951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742250_1426 (size=16681) 2024-12-12T22:36:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043063960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043063962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e0b01c2914e447b4b3d0c212172f2758 2024-12-12T22:36:43,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:43,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043063963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:43,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-12T22:36:43,983 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-12T22:36:43,999 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:44,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-12T22:36:44,001 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:44,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:44,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/eea8600a803347179e6af71b87862634 is 50, key is test_row_0/B:col10/1734043003912/Put/seqid=0 2024-12-12T22:36:44,004 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:44,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742251_1427 (size=12001) 2024-12-12T22:36:44,049 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/eea8600a803347179e6af71b87862634 2024-12-12T22:36:44,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/2a3ef50fe04a4d4583bdbea6d5d5a403 is 50, key is test_row_0/C:col10/1734043003912/Put/seqid=0 2024-12-12T22:36:44,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043064068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043064077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043064079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043064075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043064083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:44,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742252_1428 (size=12001) 2024-12-12T22:36:44,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/2a3ef50fe04a4d4583bdbea6d5d5a403 2024-12-12T22:36:44,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e0b01c2914e447b4b3d0c212172f2758 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758 2024-12-12T22:36:44,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758, entries=250, sequenceid=118, filesize=16.3 K 2024-12-12T22:36:44,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/eea8600a803347179e6af71b87862634 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634 2024-12-12T22:36:44,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T22:36:44,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/2a3ef50fe04a4d4583bdbea6d5d5a403 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403 2024-12-12T22:36:44,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403, entries=150, sequenceid=118, filesize=11.7 K 2024-12-12T22:36:44,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ead55ce3707e32db5ec1e629ea38c388 in 309ms, sequenceid=118, compaction requested=true 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:44,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-12T22:36:44,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:44,289 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:44,289 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:44,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:44,313 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/cb40a3e180ca4aa8b0c2359d1eb75a84 is 50, key is test_row_0/A:col10/1734043003943/Put/seqid=0 2024-12-12T22:36:44,345 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,370 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/349d932a2e7b4944bb0c613d298e43a5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/349d932a2e7b4944bb0c613d298e43a5 2024-12-12T22:36:44,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742253_1429 (size=12051) 2024-12-12T22:36:44,379 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/c5b328803e9b4a2881d951e876786e99 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c5b328803e9b4a2881d951e876786e99 2024-12-12T22:36:44,385 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 349d932a2e7b4944bb0c613d298e43a5(size=11.9 K), total size for store is 28.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:44,385 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:44,385 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043003861; duration=0sec 2024-12-12T22:36:44,385 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-12-12T22:36:44,385 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:44,385 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:44,385 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-12T22:36:44,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T22:36:44,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T22:36:44,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. because compaction request was cancelled 2024-12-12T22:36:44,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:44,388 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:44,392 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:44,392 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:44,392 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,392 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f63dcab337df4b5aa1ceb0921e947339, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.3 K 2024-12-12T22:36:44,395 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f63dcab337df4b5aa1ceb0921e947339, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1734043001890 2024-12-12T22:36:44,399 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4eb4ae51f0904b4c9cea7250cfca9b57, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734043003145 2024-12-12T22:36:44,400 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a3ef50fe04a4d4583bdbea6d5d5a403, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734043003262 2024-12-12T22:36:44,407 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into c5b328803e9b4a2881d951e876786e99(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:44,407 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:44,407 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043003861; duration=0sec 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 3 compacting, 0 eligible, 16 blocking 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. because compaction request was cancelled 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:44,408 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-12T22:36:44,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043064389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,409 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T22:36:44,409 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T22:36:44,409 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. because compaction request was cancelled 2024-12-12T22:36:44,409 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:44,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043064391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043064391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043064395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043064411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,437 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:44,437 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f4287dcd7b7b444ca82ef07b18dc9af2 is 50, key is test_row_0/C:col10/1734043003912/Put/seqid=0 2024-12-12T22:36:44,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742254_1430 (size=12241) 2024-12-12T22:36:44,489 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f4287dcd7b7b444ca82ef07b18dc9af2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f4287dcd7b7b444ca82ef07b18dc9af2 2024-12-12T22:36:44,503 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043064511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043064511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043064512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043064513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,527 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into f4287dcd7b7b444ca82ef07b18dc9af2(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:44,527 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:44,527 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=13, startTime=1734043004227; duration=0sec 2024-12-12T22:36:44,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043064523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,527 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:44,527 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:44,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:44,661 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043064722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043064727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,732 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043064727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043064727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,733 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:44,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043064730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/cb40a3e180ca4aa8b0c2359d1eb75a84 2024-12-12T22:36:44,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/80c0d45c0647429db88a8ffd98f9f816 is 50, key is test_row_0/B:col10/1734043003943/Put/seqid=0 2024-12-12T22:36:44,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742255_1431 (size=12051) 2024-12-12T22:36:44,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:44,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:44,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:44,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:44,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:44,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043065036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043065038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043065039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043065041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043065051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:45,139 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:45,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,280 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/80c0d45c0647429db88a8ffd98f9f816 2024-12-12T22:36:45,298 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/11d3d554e49c4307857965bf95736a9d is 50, key is test_row_0/C:col10/1734043003943/Put/seqid=0 2024-12-12T22:36:45,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742256_1432 (size=12051) 2024-12-12T22:36:45,455 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:45,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,459 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043065553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043065553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043065559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043065558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:45,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043065575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:45,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/11d3d554e49c4307857965bf95736a9d 2024-12-12T22:36:45,782 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:45,803 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/cb40a3e180ca4aa8b0c2359d1eb75a84 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84 2024-12-12T22:36:45,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84, entries=150, sequenceid=130, filesize=11.8 K 2024-12-12T22:36:45,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/80c0d45c0647429db88a8ffd98f9f816 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816 2024-12-12T22:36:45,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816, entries=150, sequenceid=130, filesize=11.8 K 2024-12-12T22:36:45,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/11d3d554e49c4307857965bf95736a9d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d 2024-12-12T22:36:45,855 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d, entries=150, sequenceid=130, filesize=11.8 K 2024-12-12T22:36:45,856 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ead55ce3707e32db5ec1e629ea38c388 in 1567ms, sequenceid=130, compaction requested=true 2024-12-12T22:36:45,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:45,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:45,857 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:45,857 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:45,860 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:45,867 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40939 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:45,867 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:45,867 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,867 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/349d932a2e7b4944bb0c613d298e43a5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=40.0 K 2024-12-12T22:36:45,870 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 349d932a2e7b4944bb0c613d298e43a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734043003145 2024-12-12T22:36:45,872 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:45,872 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:45,872 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,872 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c5b328803e9b4a2881d951e876786e99, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.4 K 2024-12-12T22:36:45,872 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e0b01c2914e447b4b3d0c212172f2758, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734043003260 2024-12-12T22:36:45,873 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5b328803e9b4a2881d951e876786e99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1734043003145 2024-12-12T22:36:45,874 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting cb40a3e180ca4aa8b0c2359d1eb75a84, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734043003943 2024-12-12T22:36:45,875 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting eea8600a803347179e6af71b87862634, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734043003262 2024-12-12T22:36:45,876 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80c0d45c0647429db88a8ffd98f9f816, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734043003943 2024-12-12T22:36:45,888 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#360 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:45,889 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/db773ab7db45486da2e690a3cd3697aa is 50, key is test_row_0/A:col10/1734043003943/Put/seqid=0 2024-12-12T22:36:45,893 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#361 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:45,893 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/048a5ac0db134acf9f19b208b80fd2f0 is 50, key is test_row_0/B:col10/1734043003943/Put/seqid=0 2024-12-12T22:36:45,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742257_1433 (size=12359) 2024-12-12T22:36:45,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742258_1434 (size=12359) 2024-12-12T22:36:45,932 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/db773ab7db45486da2e690a3cd3697aa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/db773ab7db45486da2e690a3cd3697aa 2024-12-12T22:36:45,944 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/048a5ac0db134acf9f19b208b80fd2f0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/048a5ac0db134acf9f19b208b80fd2f0 2024-12-12T22:36:45,945 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into db773ab7db45486da2e690a3cd3697aa(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:45,945 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:45,945 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:45,945 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043005856; duration=0sec 2024-12-12T22:36:45,945 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:45,945 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:45,945 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-12T22:36:45,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-12T22:36:45,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:45,946 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:36:45,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:45,946 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-12T22:36:45,946 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-12T22:36:45,946 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. because compaction request was cancelled 2024-12-12T22:36:45,946 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:45,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:45,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:45,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:45,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:45,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:45,951 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 048a5ac0db134acf9f19b208b80fd2f0(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:45,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:45,952 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043005857; duration=0sec 2024-12-12T22:36:45,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:45,952 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:45,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/613d3c48347a4fde989190b2ab6b7ec5 is 50, key is test_row_0/A:col10/1734043004386/Put/seqid=0 2024-12-12T22:36:45,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742259_1435 (size=12151) 2024-12-12T22:36:45,982 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/613d3c48347a4fde989190b2ab6b7ec5 2024-12-12T22:36:46,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6693de75d9bb4e94a6017b23ecd226b3 is 50, key is test_row_0/B:col10/1734043004386/Put/seqid=0 2024-12-12T22:36:46,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742260_1436 (size=12151) 2024-12-12T22:36:46,053 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6693de75d9bb4e94a6017b23ecd226b3 2024-12-12T22:36:46,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/37e4c48d7e304fd28024c96f45eecfdc is 50, key is test_row_0/C:col10/1734043004386/Put/seqid=0 2024-12-12T22:36:46,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742261_1437 (size=12151) 2024-12-12T22:36:46,101 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/37e4c48d7e304fd28024c96f45eecfdc 2024-12-12T22:36:46,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:46,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/613d3c48347a4fde989190b2ab6b7ec5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5 2024-12-12T22:36:46,124 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5, entries=150, sequenceid=158, filesize=11.9 K 2024-12-12T22:36:46,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6693de75d9bb4e94a6017b23ecd226b3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3 2024-12-12T22:36:46,131 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3, entries=150, sequenceid=158, filesize=11.9 K 2024-12-12T22:36:46,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/37e4c48d7e304fd28024c96f45eecfdc as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc 2024-12-12T22:36:46,138 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc, entries=150, sequenceid=158, filesize=11.9 K 2024-12-12T22:36:46,144 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for ead55ce3707e32db5ec1e629ea38c388 in 198ms, sequenceid=158, compaction requested=true 2024-12-12T22:36:46,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:46,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:46,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-12T22:36:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-12T22:36:46,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-12T22:36:46,153 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1440 sec 2024-12-12T22:36:46,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.1560 sec 2024-12-12T22:36:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:46,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:36:46,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:46,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:46,594 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:46,595 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:46,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/0b88ef9a3b6e4629a37f25b31806399c is 50, key is test_row_0/A:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:46,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742262_1438 (size=12151) 2024-12-12T22:36:46,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/0b88ef9a3b6e4629a37f25b31806399c 2024-12-12T22:36:46,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043066667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043066671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043066672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043066679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043066676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/331f01f784394f94b6e66ce6c118638f is 50, key is test_row_0/B:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:46,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742263_1439 (size=12151) 2024-12-12T22:36:46,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/331f01f784394f94b6e66ce6c118638f 2024-12-12T22:36:46,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043066783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043066790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043066790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,811 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043066797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,817 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043066809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:46,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/393472af807c4a329b5ccba9e91cf8e3 is 50, key is test_row_0/C:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:46,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742264_1440 (size=12151) 2024-12-12T22:36:46,993 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:46,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043066993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043066995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043066995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043067019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043067024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/393472af807c4a329b5ccba9e91cf8e3 2024-12-12T22:36:47,305 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043067301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043067305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043067311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043067328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/0b88ef9a3b6e4629a37f25b31806399c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c 2024-12-12T22:36:47,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043067346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T22:36:47,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/331f01f784394f94b6e66ce6c118638f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f 2024-12-12T22:36:47,424 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T22:36:47,431 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/393472af807c4a329b5ccba9e91cf8e3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3 2024-12-12T22:36:47,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3, entries=150, sequenceid=171, filesize=11.9 K 2024-12-12T22:36:47,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ead55ce3707e32db5ec1e629ea38c388 in 870ms, sequenceid=171, compaction requested=true 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:47,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:36:47,461 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:47,463 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:47,467 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:47,468 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:47,468 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:47,468 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/db773ab7db45486da2e690a3cd3697aa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.8 K 2024-12-12T22:36:47,479 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:47,479 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:47,479 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:47,480 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f4287dcd7b7b444ca82ef07b18dc9af2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=47.5 K 2024-12-12T22:36:47,480 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting db773ab7db45486da2e690a3cd3697aa, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734043003943 2024-12-12T22:36:47,480 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4287dcd7b7b444ca82ef07b18dc9af2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734043003262 2024-12-12T22:36:47,480 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 613d3c48347a4fde989190b2ab6b7ec5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1734043004382 2024-12-12T22:36:47,481 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b88ef9a3b6e4629a37f25b31806399c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:47,481 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11d3d554e49c4307857965bf95736a9d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734043003943 2024-12-12T22:36:47,481 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37e4c48d7e304fd28024c96f45eecfdc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1734043004382 2024-12-12T22:36:47,483 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 393472af807c4a329b5ccba9e91cf8e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:47,506 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#368 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:47,506 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/8c8fa76114284951aa3c2718db44cfe7 is 50, key is test_row_0/A:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:47,509 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:47,510 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/beab640e71014bc487a14c04eef22888 is 50, key is test_row_0/C:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:47,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742266_1442 (size=12527) 2024-12-12T22:36:47,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742265_1441 (size=12561) 2024-12-12T22:36:47,590 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/beab640e71014bc487a14c04eef22888 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/beab640e71014bc487a14c04eef22888 2024-12-12T22:36:47,612 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into beab640e71014bc487a14c04eef22888(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:47,612 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:47,612 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=12, startTime=1734043007461; duration=0sec 2024-12-12T22:36:47,613 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:47,613 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:47,613 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:47,621 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:47,621 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:47,621 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:47,621 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/048a5ac0db134acf9f19b208b80fd2f0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=35.8 K 2024-12-12T22:36:47,624 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 048a5ac0db134acf9f19b208b80fd2f0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1734043003943 2024-12-12T22:36:47,627 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6693de75d9bb4e94a6017b23ecd226b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1734043004382 2024-12-12T22:36:47,632 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 331f01f784394f94b6e66ce6c118638f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:47,707 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#370 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:47,711 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/b3f08bb7e38247d4baccde3b532c8c37 is 50, key is test_row_0/B:col10/1734043006585/Put/seqid=0 2024-12-12T22:36:47,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742267_1443 (size=12561) 2024-12-12T22:36:47,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:36:47,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:47,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:47,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:47,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:47,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:47,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:47,827 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:47,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/76f2f01b8a4f44d89406d51b8a98f79f is 50, key is test_row_0/A:col10/1734043006663/Put/seqid=0 2024-12-12T22:36:47,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043067859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043067863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043067865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043067867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043067874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742268_1444 (size=14541) 2024-12-12T22:36:47,984 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043067977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043067977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043067981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:47,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:47,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043067987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043067994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,018 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/8c8fa76114284951aa3c2718db44cfe7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8c8fa76114284951aa3c2718db44cfe7 2024-12-12T22:36:48,045 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 8c8fa76114284951aa3c2718db44cfe7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:48,045 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:48,045 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043007461; duration=0sec 2024-12-12T22:36:48,045 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:48,045 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:48,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-12T22:36:48,116 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-12T22:36:48,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:48,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-12T22:36:48,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:48,139 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:48,145 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:48,146 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:48,165 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/b3f08bb7e38247d4baccde3b532c8c37 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/b3f08bb7e38247d4baccde3b532c8c37 2024-12-12T22:36:48,177 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into b3f08bb7e38247d4baccde3b532c8c37(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:48,177 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:48,177 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043007461; duration=0sec 2024-12-12T22:36:48,177 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:48,177 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043068194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043068194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043068199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043068202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043068205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:48,303 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/76f2f01b8a4f44d89406d51b8a98f79f 2024-12-12T22:36:48,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:48,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:48,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,304 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/c7cf33604cc647278affa42b246da8fe is 50, key is test_row_0/B:col10/1734043006663/Put/seqid=0 2024-12-12T22:36:48,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742269_1445 (size=12151) 2024-12-12T22:36:48,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/c7cf33604cc647278affa42b246da8fe 2024-12-12T22:36:48,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/ff295e00ab36442cac81440a3f08ab4c is 50, key is test_row_0/C:col10/1734043006663/Put/seqid=0 2024-12-12T22:36:48,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:48,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742270_1446 (size=12151) 2024-12-12T22:36:48,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:48,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:48,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043068499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043068499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043068508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043068512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043068519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:48,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,791 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/ff295e00ab36442cac81440a3f08ab4c 2024-12-12T22:36:48,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/76f2f01b8a4f44d89406d51b8a98f79f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f 2024-12-12T22:36:48,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f, entries=200, sequenceid=198, filesize=14.2 K 2024-12-12T22:36:48,903 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/c7cf33604cc647278affa42b246da8fe as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe 2024-12-12T22:36:48,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T22:36:48,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/ff295e00ab36442cac81440a3f08ab4c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c 2024-12-12T22:36:48,946 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:48,946 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:48,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c, entries=150, sequenceid=198, filesize=11.9 K 2024-12-12T22:36:48,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:48,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:48,947 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:48,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for ead55ce3707e32db5ec1e629ea38c388 in 1123ms, sequenceid=198, compaction requested=false 2024-12-12T22:36:48,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:48,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:36:49,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:49,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:49,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:49,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:49,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:49,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:49,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:49,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/4c58ca1c6b394516b50ee11d00c291fa is 50, key is test_row_0/A:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:49,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742271_1447 (size=14541) 2024-12-12T22:36:49,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043069111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043069115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043069116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043069123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043069127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043069229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043069229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:49,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043069234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043069237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043069251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,262 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,268 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,432 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043069431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043069431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043069448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043069448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043069459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,515 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/4c58ca1c6b394516b50ee11d00c291fa 2024-12-12T22:36:49,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a98795cb3b75472c823c12d7b72bbaaa is 50, key is test_row_0/B:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:49,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742272_1448 (size=12151) 2024-12-12T22:36:49,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a98795cb3b75472c823c12d7b72bbaaa 2024-12-12T22:36:49,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/79230819d7a6469b8816a7c6d78dbf56 is 50, key is test_row_0/C:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742273_1449 (size=12151) 2024-12-12T22:36:49,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043069741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043069741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,755 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043069760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043069763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:49,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043069768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,925 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:49,925 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:49,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:49,926 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,078 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,079 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,099 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/79230819d7a6469b8816a7c6d78dbf56 2024-12-12T22:36:50,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/4c58ca1c6b394516b50ee11d00c291fa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa 2024-12-12T22:36:50,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa, entries=200, sequenceid=212, filesize=14.2 K 2024-12-12T22:36:50,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/a98795cb3b75472c823c12d7b72bbaaa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa 2024-12-12T22:36:50,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa, entries=150, sequenceid=212, filesize=11.9 K 2024-12-12T22:36:50,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:50,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/79230819d7a6469b8816a7c6d78dbf56 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56 2024-12-12T22:36:50,247 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,248 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043070252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043070252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,263 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56, entries=150, sequenceid=212, filesize=11.9 K 2024-12-12T22:36:50,264 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ead55ce3707e32db5ec1e629ea38c388 in 1238ms, sequenceid=212, compaction requested=true 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:50,264 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:50,264 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:50,264 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:50,266 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:50,266 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:50,266 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,266 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/b3f08bb7e38247d4baccde3b532c8c37, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.0 K 2024-12-12T22:36:50,270 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41643 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:50,270 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:50,270 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,270 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8c8fa76114284951aa3c2718db44cfe7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=40.7 K 2024-12-12T22:36:50,271 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b3f08bb7e38247d4baccde3b532c8c37, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:50,271 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c8fa76114284951aa3c2718db44cfe7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:50,271 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c7cf33604cc647278affa42b246da8fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734043006663 2024-12-12T22:36:50,272 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 76f2f01b8a4f44d89406d51b8a98f79f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734043006663 2024-12-12T22:36:50,272 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a98795cb3b75472c823c12d7b72bbaaa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007861 2024-12-12T22:36:50,274 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c58ca1c6b394516b50ee11d00c291fa, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007856 2024-12-12T22:36:50,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:50,279 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:50,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:50,295 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#377 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:50,296 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/d811b6bb9404427e9abe25fb3f21d210 is 50, key is test_row_0/B:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:50,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6b4f905407c14070b540c82e4f675637 is 50, key is test_row_0/A:col10/1734043010279/Put/seqid=0 2024-12-12T22:36:50,309 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:50,310 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/d8d2e9c3f4474952af53ee623d148a5b is 50, key is test_row_0/A:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:50,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043070305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043070307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043070308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742275_1451 (size=14541) 2024-12-12T22:36:50,400 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6b4f905407c14070b540c82e4f675637 2024-12-12T22:36:50,403 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,404 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742274_1450 (size=12663) 2024-12-12T22:36:50,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742276_1452 (size=12663) 2024-12-12T22:36:50,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043070416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,428 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043070422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043070423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,446 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5a3f9eab3d7d49c3ab25d309e93d9b18 is 50, key is test_row_0/B:col10/1734043010279/Put/seqid=0 2024-12-12T22:36:50,448 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/d8d2e9c3f4474952af53ee623d148a5b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/d8d2e9c3f4474952af53ee623d148a5b 2024-12-12T22:36:50,458 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/d811b6bb9404427e9abe25fb3f21d210 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d811b6bb9404427e9abe25fb3f21d210 2024-12-12T22:36:50,461 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into d8d2e9c3f4474952af53ee623d148a5b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:50,461 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:50,461 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043010264; duration=0sec 2024-12-12T22:36:50,462 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:50,462 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:50,462 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:50,480 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:50,480 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:50,480 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,480 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/beab640e71014bc487a14c04eef22888, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.0 K 2024-12-12T22:36:50,480 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting beab640e71014bc487a14c04eef22888, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734043006578 2024-12-12T22:36:50,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742277_1453 (size=12151) 2024-12-12T22:36:50,487 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into d811b6bb9404427e9abe25fb3f21d210(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:50,487 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:50,487 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043010264; duration=0sec 2024-12-12T22:36:50,487 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:50,487 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:50,488 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff295e00ab36442cac81440a3f08ab4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1734043006663 2024-12-12T22:36:50,490 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79230819d7a6469b8816a7c6d78dbf56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007861 2024-12-12T22:36:50,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5a3f9eab3d7d49c3ab25d309e93d9b18 2024-12-12T22:36:50,520 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#381 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:50,521 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/ffbf199fab1142b1b3635da70e642659 is 50, key is test_row_0/C:col10/1734043007861/Put/seqid=0 2024-12-12T22:36:50,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c38969da12d14a558d239f5dee6b7a0c is 50, key is test_row_0/C:col10/1734043010279/Put/seqid=0 2024-12-12T22:36:50,565 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,566 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742278_1454 (size=12629) 2024-12-12T22:36:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742279_1455 (size=12151) 2024-12-12T22:36:50,600 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/ffbf199fab1142b1b3635da70e642659 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ffbf199fab1142b1b3635da70e642659 2024-12-12T22:36:50,630 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into ffbf199fab1142b1b3635da70e642659(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:50,630 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:50,630 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=13, startTime=1734043010264; duration=0sec 2024-12-12T22:36:50,630 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:50,630 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:50,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043070632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043070632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043070636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,723 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:50,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:50,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:50,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:50,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043070948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043070950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,959 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:50,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043070957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:50,988 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c38969da12d14a558d239f5dee6b7a0c 2024-12-12T22:36:51,011 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6b4f905407c14070b540c82e4f675637 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637 2024-12-12T22:36:51,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,033 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:51,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:51,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:51,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:51,033 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:51,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:51,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:51,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637, entries=200, sequenceid=238, filesize=14.2 K 2024-12-12T22:36:51,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/5a3f9eab3d7d49c3ab25d309e93d9b18 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18 2024-12-12T22:36:51,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T22:36:51,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c38969da12d14a558d239f5dee6b7a0c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c 2024-12-12T22:36:51,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c, entries=150, sequenceid=238, filesize=11.9 K 2024-12-12T22:36:51,112 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ead55ce3707e32db5ec1e629ea38c388 in 833ms, sequenceid=238, compaction requested=false 2024-12-12T22:36:51,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:51,187 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:51,188 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:51,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e90da0b1590c4702a36f5b4f2b59f872 is 50, key is test_row_0/A:col10/1734043010301/Put/seqid=0 2024-12-12T22:36:51,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742280_1456 (size=9757) 2024-12-12T22:36:51,229 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e90da0b1590c4702a36f5b4f2b59f872 2024-12-12T22:36:51,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/73f16732ec4648149d6239001abd2bb7 is 50, key is test_row_0/B:col10/1734043010301/Put/seqid=0 2024-12-12T22:36:51,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742281_1457 (size=9757) 2024-12-12T22:36:51,263 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/73f16732ec4648149d6239001abd2bb7 2024-12-12T22:36:51,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:51,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:51,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/3a55766c36bf4994ad22325153afa655 is 50, key is test_row_0/C:col10/1734043010301/Put/seqid=0 2024-12-12T22:36:51,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742282_1458 (size=9757) 2024-12-12T22:36:51,279 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/3a55766c36bf4994ad22325153afa655 2024-12-12T22:36:51,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/e90da0b1590c4702a36f5b4f2b59f872 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872 2024-12-12T22:36:51,296 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872, entries=100, sequenceid=251, filesize=9.5 K 2024-12-12T22:36:51,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/73f16732ec4648149d6239001abd2bb7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7 2024-12-12T22:36:51,304 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7, entries=100, sequenceid=251, filesize=9.5 K 2024-12-12T22:36:51,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/3a55766c36bf4994ad22325153afa655 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655 2024-12-12T22:36:51,309 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655, entries=100, sequenceid=251, filesize=9.5 K 2024-12-12T22:36:51,311 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=67.09 KB/68700 for ead55ce3707e32db5ec1e629ea38c388 in 123ms, sequenceid=251, compaction requested=true 2024-12-12T22:36:51,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:51,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:51,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-12T22:36:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-12T22:36:51,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:51,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:51,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-12T22:36:51,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1660 sec 2024-12-12T22:36:51,317 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 3.1930 sec 2024-12-12T22:36:51,319 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/06d875d8f537461aa607c4acda761675 is 50, key is test_row_0/A:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:51,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742283_1459 (size=14741) 2024-12-12T22:36:51,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043071385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043071391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043071455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043071467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043071469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043071499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043071501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043071710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:51,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043071713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:51,755 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/06d875d8f537461aa607c4acda761675 2024-12-12T22:36:51,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/3774fd1b9267437885f024ccf3cb5c63 is 50, key is test_row_0/B:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:51,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742284_1460 (size=12301) 2024-12-12T22:36:51,831 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/3774fd1b9267437885f024ccf3cb5c63 2024-12-12T22:36:51,873 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/51e6eee0c6074b3b96c4ac0a27838b1c is 50, key is test_row_0/C:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:51,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742285_1461 (size=12301) 2024-12-12T22:36:51,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=265 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/51e6eee0c6074b3b96c4ac0a27838b1c 2024-12-12T22:36:51,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/06d875d8f537461aa607c4acda761675 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675 2024-12-12T22:36:51,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675, entries=200, sequenceid=265, filesize=14.4 K 2024-12-12T22:36:51,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/3774fd1b9267437885f024ccf3cb5c63 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63 2024-12-12T22:36:51,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63, entries=150, sequenceid=265, filesize=12.0 K 2024-12-12T22:36:51,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/51e6eee0c6074b3b96c4ac0a27838b1c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c 2024-12-12T22:36:52,006 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c, entries=150, sequenceid=265, filesize=12.0 K 2024-12-12T22:36:52,010 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for ead55ce3707e32db5ec1e629ea38c388 in 698ms, sequenceid=265, compaction requested=true 2024-12-12T22:36:52,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,010 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:52,011 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:52,011 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:52,015 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:52,016 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:52,016 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,016 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/d8d2e9c3f4474952af53ee623d148a5b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=50.5 K 2024-12-12T22:36:52,017 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8d2e9c3f4474952af53ee623d148a5b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007861 2024-12-12T22:36:52,018 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b4f905407c14070b540c82e4f675637, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734043009045 2024-12-12T22:36:52,018 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e90da0b1590c4702a36f5b4f2b59f872, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734043010301 2024-12-12T22:36:52,020 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06d875d8f537461aa607c4acda761675, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011284 2024-12-12T22:36:52,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:52,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:52,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,027 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46872 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:52,027 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:52,027 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,027 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d811b6bb9404427e9abe25fb3f21d210, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=45.8 K 2024-12-12T22:36:52,032 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d811b6bb9404427e9abe25fb3f21d210, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007861 2024-12-12T22:36:52,034 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a3f9eab3d7d49c3ab25d309e93d9b18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734043009045 2024-12-12T22:36:52,036 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 73f16732ec4648149d6239001abd2bb7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734043010301 2024-12-12T22:36:52,039 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3774fd1b9267437885f024ccf3cb5c63, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011286 2024-12-12T22:36:52,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/8d90cadceaf645efa73152fd5a65da52 is 50, key is test_row_0/A:col10/1734043011368/Put/seqid=0 2024-12-12T22:36:52,049 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:52,050 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/7d640098d3444807842db80a0d56bc9f is 50, key is test_row_0/A:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:52,065 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#391 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:52,066 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/833eb47bc75c45c58f9e2a690d3dc4e5 is 50, key is test_row_0/B:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:52,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742286_1462 (size=12301) 2024-12-12T22:36:52,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742288_1464 (size=12949) 2024-12-12T22:36:52,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742287_1463 (size=12949) 2024-12-12T22:36:52,094 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/7d640098d3444807842db80a0d56bc9f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7d640098d3444807842db80a0d56bc9f 2024-12-12T22:36:52,097 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 7d640098d3444807842db80a0d56bc9f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:52,097 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,097 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=12, startTime=1734043012010; duration=0sec 2024-12-12T22:36:52,097 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:52,098 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:52,098 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:52,098 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46838 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:52,098 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:52,098 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,099 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ffbf199fab1142b1b3635da70e642659, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=45.7 K 2024-12-12T22:36:52,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffbf199fab1142b1b3635da70e642659, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734043007861 2024-12-12T22:36:52,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c38969da12d14a558d239f5dee6b7a0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734043009045 2024-12-12T22:36:52,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a55766c36bf4994ad22325153afa655, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734043010301 2024-12-12T22:36:52,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043072092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,102 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51e6eee0c6074b3b96c4ac0a27838b1c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011286 2024-12-12T22:36:52,101 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043072092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,108 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#392 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:52,108 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f984bb104f5049e080166af16a81350b is 50, key is test_row_0/C:col10/1734043011311/Put/seqid=0 2024-12-12T22:36:52,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742289_1465 (size=12915) 2024-12-12T22:36:52,142 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f984bb104f5049e080166af16a81350b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f984bb104f5049e080166af16a81350b 2024-12-12T22:36:52,151 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into f984bb104f5049e080166af16a81350b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:52,151 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,151 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=12, startTime=1734043012011; duration=0sec 2024-12-12T22:36:52,151 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:52,151 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:52,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043072208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043072211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-12T22:36:52,247 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-12T22:36:52,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:52,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-12T22:36:52,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T22:36:52,254 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:52,254 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:52,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T22:36:52,406 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-12T22:36:52,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:52,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,421 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043072419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043072418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043072459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043072476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043072476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,482 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/8d90cadceaf645efa73152fd5a65da52 2024-12-12T22:36:52,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0e4033031cee4a16b871d8460014d801 is 50, key is test_row_0/B:col10/1734043011368/Put/seqid=0 2024-12-12T22:36:52,528 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/833eb47bc75c45c58f9e2a690d3dc4e5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/833eb47bc75c45c58f9e2a690d3dc4e5 2024-12-12T22:36:52,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742290_1466 (size=12301) 2024-12-12T22:36:52,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0e4033031cee4a16b871d8460014d801 2024-12-12T22:36:52,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T22:36:52,557 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 833eb47bc75c45c58f9e2a690d3dc4e5(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:52,557 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,557 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=12, startTime=1734043012011; duration=0sec 2024-12-12T22:36:52,557 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:52,557 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:52,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-12T22:36:52,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:52,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:52,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/7e438ed9a5894a3aa2c223745ecef843 is 50, key is test_row_0/C:col10/1734043011368/Put/seqid=0 2024-12-12T22:36:52,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742291_1467 (size=12301) 2024-12-12T22:36:52,607 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/7e438ed9a5894a3aa2c223745ecef843 2024-12-12T22:36:52,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/8d90cadceaf645efa73152fd5a65da52 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52 2024-12-12T22:36:52,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T22:36:52,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/0e4033031cee4a16b871d8460014d801 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801 2024-12-12T22:36:52,643 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T22:36:52,644 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/7e438ed9a5894a3aa2c223745ecef843 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843 2024-12-12T22:36:52,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843, entries=150, sequenceid=288, filesize=12.0 K 2024-12-12T22:36:52,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for ead55ce3707e32db5ec1e629ea38c388 in 625ms, sequenceid=288, compaction requested=false 2024-12-12T22:36:52,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,720 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,721 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,721 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:52,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:52,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:52,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:52,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6ac6a8087cf84b8e9e3970142f61e102 is 50, key is test_row_0/A:col10/1734043012070/Put/seqid=0 2024-12-12T22:36:52,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742292_1468 (size=12301) 2024-12-12T22:36:52,748 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6ac6a8087cf84b8e9e3970142f61e102 2024-12-12T22:36:52,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/37688e61337442c4aae9e1a303c74986 is 50, key is test_row_0/B:col10/1734043012070/Put/seqid=0 2024-12-12T22:36:52,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043072763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043072764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742293_1469 (size=12301) 2024-12-12T22:36:52,807 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/37688e61337442c4aae9e1a303c74986 2024-12-12T22:36:52,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/54480341c69449eabd49d1a227a34bc4 is 50, key is test_row_0/C:col10/1734043012070/Put/seqid=0 2024-12-12T22:36:52,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T22:36:52,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742294_1470 (size=12301) 2024-12-12T22:36:52,869 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043072865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:52,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043072869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:52,877 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/54480341c69449eabd49d1a227a34bc4 2024-12-12T22:36:52,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/6ac6a8087cf84b8e9e3970142f61e102 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102 2024-12-12T22:36:52,913 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T22:36:52,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/37688e61337442c4aae9e1a303c74986 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986 2024-12-12T22:36:52,918 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T22:36:52,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/54480341c69449eabd49d1a227a34bc4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4 2024-12-12T22:36:52,927 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4, entries=150, sequenceid=304, filesize=12.0 K 2024-12-12T22:36:52,928 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for ead55ce3707e32db5ec1e629ea38c388 in 207ms, sequenceid=304, compaction requested=true 2024-12-12T22:36:52,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:52,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:52,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-12T22:36:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-12T22:36:52,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-12T22:36:52,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 675 msec 2024-12-12T22:36:52,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 679 msec 2024-12-12T22:36:53,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:53,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:53,088 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/30bc362075f04748a14ba9dfe498f860 is 50, key is test_row_0/A:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:53,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043073106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043073110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742295_1471 (size=14741) 2024-12-12T22:36:53,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043073210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043073223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-12T22:36:53,365 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-12T22:36:53,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:53,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-12T22:36:53,377 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:53,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:53,378 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:53,378 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:53,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043073416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,437 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043073431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:53,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/30bc362075f04748a14ba9dfe498f860 2024-12-12T22:36:53,531 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:53,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:53,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/288a2682570e42c29a8294b2ed00b619 is 50, key is test_row_0/B:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:53,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742296_1472 (size=12301) 2024-12-12T22:36:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:53,694 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:53,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043073722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043073747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:53,859 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:53,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:53,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:53,859 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:53,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:53,994 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/288a2682570e42c29a8294b2ed00b619 2024-12-12T22:36:54,011 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/93b2ed5052674bec855c11cf78e62098 is 50, key is test_row_0/C:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:54,012 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:54,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:54,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742297_1473 (size=12301) 2024-12-12T22:36:54,167 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,168 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:54,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:54,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043074241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:54,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043074263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,324 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:54,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:54,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/93b2ed5052674bec855c11cf78e62098 2024-12-12T22:36:54,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:54,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043074468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,475 DEBUG [Thread-1836 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4167 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:54,479 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:54,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/30bc362075f04748a14ba9dfe498f860 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860 2024-12-12T22:36:54,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:54,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:54,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,484 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:54,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:54,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043074496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,504 DEBUG [Thread-1832 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:54,505 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:54,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043074498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,507 DEBUG [Thread-1834 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:54,512 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860, entries=200, sequenceid=329, filesize=14.4 K 2024-12-12T22:36:54,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/288a2682570e42c29a8294b2ed00b619 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619 2024-12-12T22:36:54,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619, entries=150, sequenceid=329, filesize=12.0 K 2024-12-12T22:36:54,530 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/93b2ed5052674bec855c11cf78e62098 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098 2024-12-12T22:36:54,554 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098, entries=150, sequenceid=329, filesize=12.0 K 2024-12-12T22:36:54,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ead55ce3707e32db5ec1e629ea38c388 in 1482ms, sequenceid=329, compaction requested=true 2024-12-12T22:36:54,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:54,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:54,558 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:54,559 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:54,560 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:54,560 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:54,560 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,560 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/833eb47bc75c45c58f9e2a690d3dc4e5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=48.7 K 2024-12-12T22:36:54,560 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52292 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:54,561 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:54,561 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,561 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7d640098d3444807842db80a0d56bc9f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=51.1 K 2024-12-12T22:36:54,561 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 833eb47bc75c45c58f9e2a690d3dc4e5, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011286 2024-12-12T22:36:54,561 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d640098d3444807842db80a0d56bc9f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011286 2024-12-12T22:36:54,561 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e4033031cee4a16b871d8460014d801, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734043011368 2024-12-12T22:36:54,562 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 37688e61337442c4aae9e1a303c74986, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734043012059 2024-12-12T22:36:54,562 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8d90cadceaf645efa73152fd5a65da52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734043011368 2024-12-12T22:36:54,567 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 288a2682570e42c29a8294b2ed00b619, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012761 2024-12-12T22:36:54,567 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ac6a8087cf84b8e9e3970142f61e102, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734043012059 2024-12-12T22:36:54,569 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30bc362075f04748a14ba9dfe498f860, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012753 2024-12-12T22:36:54,593 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:54,594 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/024c077f847f4dc9a3f696fdad6d3c6c is 50, key is test_row_0/B:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:54,601 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:54,602 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/f5aa53c3ffa748d185dae93aa588b331 is 50, key is test_row_0/A:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:54,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742298_1474 (size=13085) 2024-12-12T22:36:54,617 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/024c077f847f4dc9a3f696fdad6d3c6c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/024c077f847f4dc9a3f696fdad6d3c6c 2024-12-12T22:36:54,624 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 024c077f847f4dc9a3f696fdad6d3c6c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:54,624 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:54,625 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=12, startTime=1734043014558; duration=0sec 2024-12-12T22:36:54,625 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:54,625 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:54,625 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:36:54,626 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:36:54,626 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:54,627 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,627 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f984bb104f5049e080166af16a81350b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=48.7 K 2024-12-12T22:36:54,627 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f984bb104f5049e080166af16a81350b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=265, earliestPutTs=1734043011286 2024-12-12T22:36:54,627 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e438ed9a5894a3aa2c223745ecef843, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1734043011368 2024-12-12T22:36:54,628 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 54480341c69449eabd49d1a227a34bc4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1734043012059 2024-12-12T22:36:54,628 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 93b2ed5052674bec855c11cf78e62098, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012761 2024-12-12T22:36:54,641 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:54,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:54,642 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:54,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:54,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742299_1475 (size=13085) 2024-12-12T22:36:54,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/83c9ab4df3344a3f8504ca9cdd1b3748 is 50, key is test_row_0/A:col10/1734043013104/Put/seqid=0 2024-12-12T22:36:54,665 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#404 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:54,666 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c4b961d052944ac1aa58e14a3139b740 is 50, key is test_row_0/C:col10/1734043012761/Put/seqid=0 2024-12-12T22:36:54,674 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/f5aa53c3ffa748d185dae93aa588b331 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/f5aa53c3ffa748d185dae93aa588b331 2024-12-12T22:36:54,703 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into f5aa53c3ffa748d185dae93aa588b331(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:54,703 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:54,703 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=12, startTime=1734043014557; duration=0sec 2024-12-12T22:36:54,703 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:54,703 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:54,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742300_1476 (size=12301) 2024-12-12T22:36:54,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742301_1477 (size=13051) 2024-12-12T22:36:54,744 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c4b961d052944ac1aa58e14a3139b740 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c4b961d052944ac1aa58e14a3139b740 2024-12-12T22:36:54,763 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into c4b961d052944ac1aa58e14a3139b740(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:54,763 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:54,763 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=12, startTime=1734043014558; duration=0sec 2024-12-12T22:36:54,763 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:54,763 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:55,107 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/83c9ab4df3344a3f8504ca9cdd1b3748 2024-12-12T22:36:55,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6e4c0370ce9a45e3a83ea4350e99ca0c is 50, key is test_row_0/B:col10/1734043013104/Put/seqid=0 2024-12-12T22:36:55,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742302_1478 (size=12301) 2024-12-12T22:36:55,140 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6e4c0370ce9a45e3a83ea4350e99ca0c 2024-12-12T22:36:55,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/38dde18c2d5e49eb990fab848d52e44a is 50, key is test_row_0/C:col10/1734043013104/Put/seqid=0 2024-12-12T22:36:55,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742303_1479 (size=12301) 2024-12-12T22:36:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:55,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:55,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043075365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043075366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043075469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043075469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:55,571 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/38dde18c2d5e49eb990fab848d52e44a 2024-12-12T22:36:55,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/83c9ab4df3344a3f8504ca9cdd1b3748 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748 2024-12-12T22:36:55,619 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748, entries=150, sequenceid=341, filesize=12.0 K 2024-12-12T22:36:55,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/6e4c0370ce9a45e3a83ea4350e99ca0c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c 2024-12-12T22:36:55,656 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c, entries=150, sequenceid=341, filesize=12.0 K 2024-12-12T22:36:55,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/38dde18c2d5e49eb990fab848d52e44a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a 2024-12-12T22:36:55,673 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a, entries=150, sequenceid=341, filesize=12.0 K 2024-12-12T22:36:55,674 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for ead55ce3707e32db5ec1e629ea38c388 in 1032ms, sequenceid=341, compaction requested=false 2024-12-12T22:36:55,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:55,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:55,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-12T22:36:55,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-12T22:36:55,680 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:55,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:55,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:55,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/462b1ca3f1f148218d683ef5b92950da is 50, key is test_row_0/A:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:55,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-12T22:36:55,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3090 sec 2024-12-12T22:36:55,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043075702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043075704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.3270 sec 2024-12-12T22:36:55,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742304_1480 (size=14741) 2024-12-12T22:36:55,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/462b1ca3f1f148218d683ef5b92950da 2024-12-12T22:36:55,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/be04185007374763a89161b3fc546883 is 50, key is test_row_0/B:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:55,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043075812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:55,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043075816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:55,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742305_1481 (size=12301) 2024-12-12T22:36:55,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/be04185007374763a89161b3fc546883 2024-12-12T22:36:55,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/e210a14f61b54e928336f4f7a7522a8e is 50, key is test_row_0/C:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:55,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742306_1482 (size=12301) 2024-12-12T22:36:55,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/e210a14f61b54e928336f4f7a7522a8e 2024-12-12T22:36:55,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/462b1ca3f1f148218d683ef5b92950da as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da 2024-12-12T22:36:56,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043076018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da, entries=200, sequenceid=370, filesize=14.4 K 2024-12-12T22:36:56,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/be04185007374763a89161b3fc546883 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883 2024-12-12T22:36:56,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043076031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883, entries=150, sequenceid=370, filesize=12.0 K 2024-12-12T22:36:56,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/e210a14f61b54e928336f4f7a7522a8e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e 2024-12-12T22:36:56,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e, entries=150, sequenceid=370, filesize=12.0 K 2024-12-12T22:36:56,096 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-12T22:36:56,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for ead55ce3707e32db5ec1e629ea38c388 in 418ms, sequenceid=370, compaction requested=true 2024-12-12T22:36:56,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:56,098 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:56,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:56,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:56,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:56,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:56,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:56,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:56,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:56,103 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:56,104 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:56,104 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:56,104 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/f5aa53c3ffa748d185dae93aa588b331, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=39.2 K 2024-12-12T22:36:56,105 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5aa53c3ffa748d185dae93aa588b331, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012761 2024-12-12T22:36:56,106 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:56,106 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:56,106 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:56,106 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/024c077f847f4dc9a3f696fdad6d3c6c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.8 K 2024-12-12T22:36:56,106 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83c9ab4df3344a3f8504ca9cdd1b3748, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734043013097 2024-12-12T22:36:56,107 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 024c077f847f4dc9a3f696fdad6d3c6c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012761 2024-12-12T22:36:56,107 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 462b1ca3f1f148218d683ef5b92950da, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:56,112 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e4c0370ce9a45e3a83ea4350e99ca0c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734043013097 2024-12-12T22:36:56,116 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting be04185007374763a89161b3fc546883, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:56,130 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#410 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:56,130 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3bd9375b4ff94dcba2986f0cd82eeaa5 is 50, key is test_row_0/A:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:56,139 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#411 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:56,143 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/f7abfa3712af4d99940dd2973efbae4d is 50, key is test_row_0/B:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:56,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742307_1483 (size=13187) 2024-12-12T22:36:56,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742308_1484 (size=13187) 2024-12-12T22:36:56,200 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3bd9375b4ff94dcba2986f0cd82eeaa5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bd9375b4ff94dcba2986f0cd82eeaa5 2024-12-12T22:36:56,208 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/f7abfa3712af4d99940dd2973efbae4d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/f7abfa3712af4d99940dd2973efbae4d 2024-12-12T22:36:56,215 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 3bd9375b4ff94dcba2986f0cd82eeaa5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:56,215 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:56,215 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043016098; duration=0sec 2024-12-12T22:36:56,216 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:56,216 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:56,216 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:56,225 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into f7abfa3712af4d99940dd2973efbae4d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:56,225 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:56,225 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043016098; duration=0sec 2024-12-12T22:36:56,225 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:56,225 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:56,227 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:56,227 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:56,227 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:56,227 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c4b961d052944ac1aa58e14a3139b740, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.8 K 2024-12-12T22:36:56,228 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4b961d052944ac1aa58e14a3139b740, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=329, earliestPutTs=1734043012761 2024-12-12T22:36:56,234 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38dde18c2d5e49eb990fab848d52e44a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734043013097 2024-12-12T22:36:56,243 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e210a14f61b54e928336f4f7a7522a8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:56,313 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:56,313 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/964df1ea72d443ae9ba30bdf6ce4fbe6 is 50, key is test_row_0/C:col10/1734043015361/Put/seqid=0 2024-12-12T22:36:56,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:56,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:56,344 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:56,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3bfc1b1a2ab04c3295e3442128e42b31 is 50, key is test_row_0/A:col10/1734043016330/Put/seqid=0 2024-12-12T22:36:56,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742309_1485 (size=13153) 2024-12-12T22:36:56,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742310_1486 (size=14741) 2024-12-12T22:36:56,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043076461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043076464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043076571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043076571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043076778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:56,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043076778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:56,794 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/964df1ea72d443ae9ba30bdf6ce4fbe6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/964df1ea72d443ae9ba30bdf6ce4fbe6 2024-12-12T22:36:56,802 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into 964df1ea72d443ae9ba30bdf6ce4fbe6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:56,802 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:56,802 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=13, startTime=1734043016099; duration=0sec 2024-12-12T22:36:56,802 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:56,802 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:56,821 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3bfc1b1a2ab04c3295e3442128e42b31 2024-12-12T22:36:56,838 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/28858bc77d054351b01449aa8b3bea04 is 50, key is test_row_0/B:col10/1734043016330/Put/seqid=0 2024-12-12T22:36:56,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742311_1487 (size=12301) 2024-12-12T22:36:57,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043077085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043077097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/28858bc77d054351b01449aa8b3bea04 2024-12-12T22:36:57,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/105514c76afb40f196e8b5285cad48f6 is 50, key is test_row_0/C:col10/1734043016330/Put/seqid=0 2024-12-12T22:36:57,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742312_1488 (size=12301) 2024-12-12T22:36:57,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/105514c76afb40f196e8b5285cad48f6 2024-12-12T22:36:57,344 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/3bfc1b1a2ab04c3295e3442128e42b31 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31 2024-12-12T22:36:57,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31, entries=200, sequenceid=384, filesize=14.4 K 2024-12-12T22:36:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/28858bc77d054351b01449aa8b3bea04 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04 2024-12-12T22:36:57,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04, entries=150, sequenceid=384, filesize=12.0 K 2024-12-12T22:36:57,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/105514c76afb40f196e8b5285cad48f6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6 2024-12-12T22:36:57,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6, entries=150, sequenceid=384, filesize=12.0 K 2024-12-12T22:36:57,389 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for ead55ce3707e32db5ec1e629ea38c388 in 1050ms, sequenceid=384, compaction requested=false 2024-12-12T22:36:57,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:57,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-12T22:36:57,499 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-12T22:36:57,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:36:57,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-12T22:36:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:57,523 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:36:57,531 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:36:57,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:36:57,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:36:57,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:57,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:57,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:57,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 is 50, key is test_row_0/A:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:57,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:57,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742313_1489 (size=14741) 2024-12-12T22:36:57,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043077677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043077678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,687 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:57,688 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043077791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043077788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:57,851 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:57,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:57,852 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:57,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:57,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043077994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,011 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,012 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043078007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 2024-12-12T22:36:58,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/e0c93cf343cb4f89980781baf095f022 is 50, key is test_row_0/B:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:58,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:58,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742314_1490 (size=12301) 2024-12-12T22:36:58,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/e0c93cf343cb4f89980781baf095f022 2024-12-12T22:36:58,177 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/b290939d640940379e888528e1742ffc is 50, key is test_row_0/C:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:58,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742315_1491 (size=12301) 2024-12-12T22:36:58,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043078300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043078320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,340 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,342 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34754 deadline: 1734043078494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,498 DEBUG [Thread-1836 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8191 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:58,501 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,505 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34844 deadline: 1734043078529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34836 deadline: 1734043078534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,534 DEBUG [Thread-1834 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:58,535 DEBUG [Thread-1832 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8227 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., hostname=1aef280cf0a8,36025,1734042873576, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:36:58,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:58,662 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/b290939d640940379e888528e1742ffc 2024-12-12T22:36:58,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 2024-12-12T22:36:58,764 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5, entries=200, sequenceid=410, filesize=14.4 K 2024-12-12T22:36:58,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/e0c93cf343cb4f89980781baf095f022 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022 2024-12-12T22:36:58,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022, entries=150, sequenceid=410, filesize=12.0 K 2024-12-12T22:36:58,783 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/b290939d640940379e888528e1742ffc as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc 2024-12-12T22:36:58,801 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc, entries=150, sequenceid=410, filesize=12.0 K 2024-12-12T22:36:58,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for ead55ce3707e32db5ec1e629ea38c388 in 1203ms, sequenceid=410, compaction requested=true 2024-12-12T22:36:58,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:36:58,808 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:36:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:36:58,808 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:58,812 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42669 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:58,812 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:36:58,812 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,812 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bd9375b4ff94dcba2986f0cd82eeaa5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=41.7 K 2024-12-12T22:36:58,813 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:58,813 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:36:58,813 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,813 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/f7abfa3712af4d99940dd2973efbae4d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.9 K 2024-12-12T22:36:58,813 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bd9375b4ff94dcba2986f0cd82eeaa5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:58,815 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7abfa3712af4d99940dd2973efbae4d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:58,816 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bfc1b1a2ab04c3295e3442128e42b31, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043015698 2024-12-12T22:36:58,818 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28858bc77d054351b01449aa8b3bea04, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043016330 2024-12-12T22:36:58,818 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a1b9ca7e84d74140ad7e7da2bc42cbd5, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:36:58,820 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0c93cf343cb4f89980781baf095f022, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:36:58,824 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:36:58,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:58,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:58,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:58,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:58,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:58,825 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:58,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:58,827 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,848 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#419 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:58,849 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/550d2417e79c413ab3427dfaafb5c8ff is 50, key is test_row_0/A:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:58,850 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:58,851 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/54878681907d40beb5f10bdde2314dfe is 50, key is test_row_0/B:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:58,853 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/b6bf1cf3a600400cba5a99c011c1b5f7 is 50, key is test_row_0/A:col10/1734043017621/Put/seqid=0 2024-12-12T22:36:58,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742316_1492 (size=13289) 2024-12-12T22:36:58,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742318_1494 (size=13289) 2024-12-12T22:36:58,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742317_1493 (size=14741) 2024-12-12T22:36:58,987 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:58,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:58,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:58,991 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:58,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043078989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:58,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:58,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043078992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043079098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,112 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043079111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,147 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,293 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/b6bf1cf3a600400cba5a99c011c1b5f7 2024-12-12T22:36:59,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043079305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,313 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/550d2417e79c413ab3427dfaafb5c8ff as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/550d2417e79c413ab3427dfaafb5c8ff 2024-12-12T22:36:59,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,317 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/54878681907d40beb5f10bdde2314dfe as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/54878681907d40beb5f10bdde2314dfe 2024-12-12T22:36:59,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043079323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/bedfe09329b6461a97405c35592858d2 is 50, key is test_row_0/B:col10/1734043017621/Put/seqid=0 2024-12-12T22:36:59,343 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 550d2417e79c413ab3427dfaafb5c8ff(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:59,343 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:59,343 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043018808; duration=0sec 2024-12-12T22:36:59,344 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:36:59,344 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:36:59,344 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:36:59,345 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:36:59,345 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:36:59,345 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,345 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/964df1ea72d443ae9ba30bdf6ce4fbe6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=36.9 K 2024-12-12T22:36:59,347 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 964df1ea72d443ae9ba30bdf6ce4fbe6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=370, earliestPutTs=1734043015324 2024-12-12T22:36:59,351 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 105514c76afb40f196e8b5285cad48f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043016330 2024-12-12T22:36:59,353 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b290939d640940379e888528e1742ffc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:36:59,354 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 54878681907d40beb5f10bdde2314dfe(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:59,354 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:59,354 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043018808; duration=0sec 2024-12-12T22:36:59,354 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:59,354 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:36:59,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742319_1495 (size=12301) 2024-12-12T22:36:59,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/bedfe09329b6461a97405c35592858d2 2024-12-12T22:36:59,373 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:36:59,373 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/5684b2e471494a8184a28f458c511fe9 is 50, key is test_row_0/C:col10/1734043016415/Put/seqid=0 2024-12-12T22:36:59,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f44964eb6353417e9c1461ba3f3aab02 is 50, key is test_row_0/C:col10/1734043017621/Put/seqid=0 2024-12-12T22:36:59,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742320_1496 (size=13255) 2024-12-12T22:36:59,419 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/5684b2e471494a8184a28f458c511fe9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/5684b2e471494a8184a28f458c511fe9 2024-12-12T22:36:59,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into 5684b2e471494a8184a28f458c511fe9(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:36:59,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742321_1497 (size=12301) 2024-12-12T22:36:59,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:59,428 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=13, startTime=1734043018808; duration=0sec 2024-12-12T22:36:59,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:36:59,428 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:36:59,428 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f44964eb6353417e9c1461ba3f3aab02 2024-12-12T22:36:59,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/b6bf1cf3a600400cba5a99c011c1b5f7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7 2024-12-12T22:36:59,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7, entries=200, sequenceid=422, filesize=14.4 K 2024-12-12T22:36:59,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/bedfe09329b6461a97405c35592858d2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2 2024-12-12T22:36:59,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2, entries=150, sequenceid=422, filesize=12.0 K 2024-12-12T22:36:59,468 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f44964eb6353417e9c1461ba3f3aab02 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02 2024-12-12T22:36:59,474 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,479 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,482 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02, entries=150, sequenceid=422, filesize=12.0 K 2024-12-12T22:36:59,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for ead55ce3707e32db5ec1e629ea38c388 in 663ms, sequenceid=422, compaction requested=false 2024-12-12T22:36:59,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:36:59,632 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:36:59,632 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:36:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:36:59,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/c96c0ee0f56c4a1b8e748442a454808c is 50, key is test_row_0/A:col10/1734043018961/Put/seqid=0 2024-12-12T22:36:59,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:36:59,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043079660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043079662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742322_1498 (size=14741) 2024-12-12T22:36:59,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/c96c0ee0f56c4a1b8e748442a454808c 2024-12-12T22:36:59,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/03dfeae19600470abb0b17dafe73dc31 is 50, key is test_row_0/B:col10/1734043018961/Put/seqid=0 2024-12-12T22:36:59,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742323_1499 (size=12301) 2024-12-12T22:36:59,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043079772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043079772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,794 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:36:59,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:36:59,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:36:59,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:36:59,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043079978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:36:59,990 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:36:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043079983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,117 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:37:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:37:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,118 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/03dfeae19600470abb0b17dafe73dc31 2024-12-12T22:37:00,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c83f0d4d6bdf474eb2b8d658adfddae8 is 50, key is test_row_0/C:col10/1734043018961/Put/seqid=0 2024-12-12T22:37:00,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742324_1500 (size=12301) 2024-12-12T22:37:00,273 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:37:00,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:37:00,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,274 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:00,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34762 deadline: 1734043080286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:00,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34794 deadline: 1734043080299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,426 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,427 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:37:00,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:37:00,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,583 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:37:00,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:37:00,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,584 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:00,616 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c83f0d4d6bdf474eb2b8d658adfddae8 2024-12-12T22:37:00,627 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/c96c0ee0f56c4a1b8e748442a454808c as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c 2024-12-12T22:37:00,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c, entries=200, sequenceid=451, filesize=14.4 K 2024-12-12T22:37:00,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/03dfeae19600470abb0b17dafe73dc31 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31 2024-12-12T22:37:00,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31, entries=150, sequenceid=451, filesize=12.0 K 2024-12-12T22:37:00,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/c83f0d4d6bdf474eb2b8d658adfddae8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8 2024-12-12T22:37:00,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8, entries=150, sequenceid=451, filesize=12.0 K 2024-12-12T22:37:00,665 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for ead55ce3707e32db5ec1e629ea38c388 in 1032ms, sequenceid=451, compaction requested=true 2024-12-12T22:37:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:00,665 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:00,666 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:00,666 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42771 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:00,666 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/A is initiating minor compaction (all files) 2024-12-12T22:37:00,666 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/A in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,667 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/550d2417e79c413ab3427dfaafb5c8ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=41.8 K 2024-12-12T22:37:00,667 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 550d2417e79c413ab3427dfaafb5c8ff, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:37:00,667 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6bf1cf3a600400cba5a99c011c1b5f7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1734043017621 2024-12-12T22:37:00,668 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c96c0ee0f56c4a1b8e748442a454808c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734043018961 2024-12-12T22:37:00,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store ead55ce3707e32db5ec1e629ea38c388:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:00,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:00,671 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:00,671 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/B is initiating minor compaction (all files) 2024-12-12T22:37:00,671 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/B in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,671 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/54878681907d40beb5f10bdde2314dfe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=37.0 K 2024-12-12T22:37:00,673 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 54878681907d40beb5f10bdde2314dfe, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:37:00,677 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bedfe09329b6461a97405c35592858d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1734043017621 2024-12-12T22:37:00,678 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 03dfeae19600470abb0b17dafe73dc31, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734043018961 2024-12-12T22:37:00,687 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#B#compaction#428 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:00,689 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/8c7f4c7ad2d64784b751f6313dd5ba86 is 50, key is test_row_0/B:col10/1734043018961/Put/seqid=0 2024-12-12T22:37:00,717 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#A#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:00,718 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/7649c4f9c1894b28802b1012854b0c08 is 50, key is test_row_0/A:col10/1734043018961/Put/seqid=0 2024-12-12T22:37:00,738 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:00,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742325_1501 (size=13391) 2024-12-12T22:37:00,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-12T22:37:00,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,739 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:37:00,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:00,743 DEBUG [Thread-1847 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x79d49886 to 127.0.0.1:50645 2024-12-12T22:37:00,743 DEBUG [Thread-1847 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,744 DEBUG [Thread-1843 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:50645 2024-12-12T22:37:00,744 DEBUG [Thread-1843 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,748 DEBUG [Thread-1845 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:50645 2024-12-12T22:37:00,748 DEBUG [Thread-1845 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/089554f353844ae286ecccc60aa2f2ca is 50, key is test_row_0/A:col10/1734043019659/Put/seqid=0 2024-12-12T22:37:00,752 DEBUG [Thread-1849 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x635b1751 to 127.0.0.1:50645 2024-12-12T22:37:00,752 DEBUG [Thread-1849 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,758 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/8c7f4c7ad2d64784b751f6313dd5ba86 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/8c7f4c7ad2d64784b751f6313dd5ba86 2024-12-12T22:37:00,760 DEBUG [Thread-1841 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:50645 2024-12-12T22:37:00,760 DEBUG [Thread-1841 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742327_1503 (size=12301) 2024-12-12T22:37:00,769 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/089554f353844ae286ecccc60aa2f2ca 2024-12-12T22:37:00,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742326_1502 (size=13391) 2024-12-12T22:37:00,777 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/B of ead55ce3707e32db5ec1e629ea38c388 into 8c7f4c7ad2d64784b751f6313dd5ba86(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:00,777 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:00,777 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/B, priority=13, startTime=1734043020665; duration=0sec 2024-12-12T22:37:00,777 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:00,777 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:B 2024-12-12T22:37:00,777 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:00,783 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:00,783 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): ead55ce3707e32db5ec1e629ea38c388/C is initiating minor compaction (all files) 2024-12-12T22:37:00,783 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of ead55ce3707e32db5ec1e629ea38c388/C in TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:00,784 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/5684b2e471494a8184a28f458c511fe9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp, totalSize=37.0 K 2024-12-12T22:37:00,788 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5684b2e471494a8184a28f458c511fe9, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1734043016415 2024-12-12T22:37:00,794 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/7649c4f9c1894b28802b1012854b0c08 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7649c4f9c1894b28802b1012854b0c08 2024-12-12T22:37:00,794 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting f44964eb6353417e9c1461ba3f3aab02, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1734043017621 2024-12-12T22:37:00,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:37:00,797 DEBUG [Thread-1830 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:50645 2024-12-12T22:37:00,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. as already flushing 2024-12-12T22:37:00,797 DEBUG [Thread-1830 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,798 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c83f0d4d6bdf474eb2b8d658adfddae8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=451, earliestPutTs=1734043018961 2024-12-12T22:37:00,804 DEBUG [Thread-1838 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2070263a to 127.0.0.1:50645 2024-12-12T22:37:00,804 DEBUG [Thread-1838 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:00,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/fa884493759c431289509ed674231b89 is 50, key is test_row_0/B:col10/1734043019659/Put/seqid=0 2024-12-12T22:37:00,814 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/A of ead55ce3707e32db5ec1e629ea38c388 into 7649c4f9c1894b28802b1012854b0c08(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:00,814 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:00,814 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/A, priority=13, startTime=1734043020665; duration=0sec 2024-12-12T22:37:00,814 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:00,814 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:A 2024-12-12T22:37:00,829 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): ead55ce3707e32db5ec1e629ea38c388#C#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:00,830 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f43f986027a847ddb4d2cd7f0032310e is 50, key is test_row_0/C:col10/1734043018961/Put/seqid=0 2024-12-12T22:37:00,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742328_1504 (size=12301) 2024-12-12T22:37:00,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742329_1505 (size=13357) 2024-12-12T22:37:01,232 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:37:01,237 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/fa884493759c431289509ed674231b89 2024-12-12T22:37:01,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/9b771bdcb029478caec46b41e166c92b is 50, key is test_row_0/C:col10/1734043019659/Put/seqid=0 2024-12-12T22:37:01,244 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/f43f986027a847ddb4d2cd7f0032310e as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f43f986027a847ddb4d2cd7f0032310e 2024-12-12T22:37:01,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742330_1506 (size=12301) 2024-12-12T22:37:01,248 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in ead55ce3707e32db5ec1e629ea38c388/C of ead55ce3707e32db5ec1e629ea38c388 into f43f986027a847ddb4d2cd7f0032310e(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:01,248 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:01,248 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388., storeName=ead55ce3707e32db5ec1e629ea38c388/C, priority=13, startTime=1734043020665; duration=0sec 2024-12-12T22:37:01,248 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:01,248 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: ead55ce3707e32db5ec1e629ea38c388:C 2024-12-12T22:37:01,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:37:01,647 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=461 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/9b771bdcb029478caec46b41e166c92b 2024-12-12T22:37:01,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/089554f353844ae286ecccc60aa2f2ca as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/089554f353844ae286ecccc60aa2f2ca 2024-12-12T22:37:01,654 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/089554f353844ae286ecccc60aa2f2ca, entries=150, sequenceid=461, filesize=12.0 K 2024-12-12T22:37:01,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/fa884493759c431289509ed674231b89 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/fa884493759c431289509ed674231b89 2024-12-12T22:37:01,658 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/fa884493759c431289509ed674231b89, entries=150, sequenceid=461, filesize=12.0 K 2024-12-12T22:37:01,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/9b771bdcb029478caec46b41e166c92b as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/9b771bdcb029478caec46b41e166c92b 2024-12-12T22:37:01,662 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/9b771bdcb029478caec46b41e166c92b, entries=150, sequenceid=461, filesize=12.0 K 2024-12-12T22:37:01,663 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=13.42 KB/13740 for ead55ce3707e32db5ec1e629ea38c388 in 924ms, sequenceid=461, compaction requested=false 2024-12-12T22:37:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:01,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-12T22:37:01,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-12T22:37:01,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-12T22:37:01,665 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.1330 sec 2024-12-12T22:37:01,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 4.1550 sec 2024-12-12T22:37:05,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-12T22:37:05,645 INFO [Thread-1840 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-12T22:37:08,510 DEBUG [Thread-1836 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bc486e1 to 127.0.0.1:50645 2024-12-12T22:37:08,510 DEBUG [Thread-1836 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:08,556 DEBUG [Thread-1834 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:50645 2024-12-12T22:37:08,557 DEBUG [Thread-1834 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:08,601 DEBUG [Thread-1832 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:50645 2024-12-12T22:37:08,602 DEBUG [Thread-1832 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 124 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 129 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1465 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4395 rows 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1459 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4377 rows 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1458 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4374 rows 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1459 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4377 rows 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1482 2024-12-12T22:37:08,602 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4446 rows 2024-12-12T22:37:08,602 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:37:08,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:50645 2024-12-12T22:37:08,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:08,604 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:37:08,604 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:37:08,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:08,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T22:37:08,607 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043028607"}]},"ts":"1734043028607"} 2024-12-12T22:37:08,608 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:37:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T22:37:08,872 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:37:08,873 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:37:08,874 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, UNASSIGN}] 2024-12-12T22:37:08,875 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, UNASSIGN 2024-12-12T22:37:08,875 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=ead55ce3707e32db5ec1e629ea38c388, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:08,876 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:37:08,876 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:37:08,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T22:37:09,027 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:09,028 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing ead55ce3707e32db5ec1e629ea38c388, disabling compactions & flushes 2024-12-12T22:37:09,028 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. after waiting 0 ms 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:09,028 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing ead55ce3707e32db5ec1e629ea38c388 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=A 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=B 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK ead55ce3707e32db5ec1e629ea38c388, store=C 2024-12-12T22:37:09,028 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:09,032 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/899fbcc321fc484c802adad9549437a5 is 50, key is test_row_0/A:col10/1734043028555/Put/seqid=0 2024-12-12T22:37:09,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742331_1507 (size=12301) 2024-12-12T22:37:09,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T22:37:09,523 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/899fbcc321fc484c802adad9549437a5 2024-12-12T22:37:09,531 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/065c8ec2e2b34016b8c99c316403ed14 is 50, key is test_row_0/B:col10/1734043028555/Put/seqid=0 2024-12-12T22:37:09,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742332_1508 (size=12301) 2024-12-12T22:37:09,536 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/065c8ec2e2b34016b8c99c316403ed14 2024-12-12T22:37:09,545 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/34b19a7f654942fc85de4d31b363c603 is 50, key is test_row_0/C:col10/1734043028555/Put/seqid=0 2024-12-12T22:37:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742333_1509 (size=12301) 2024-12-12T22:37:09,551 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/34b19a7f654942fc85de4d31b363c603 2024-12-12T22:37:09,562 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/A/899fbcc321fc484c802adad9549437a5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/899fbcc321fc484c802adad9549437a5 2024-12-12T22:37:09,566 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/899fbcc321fc484c802adad9549437a5, entries=150, sequenceid=472, filesize=12.0 K 2024-12-12T22:37:09,566 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/B/065c8ec2e2b34016b8c99c316403ed14 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/065c8ec2e2b34016b8c99c316403ed14 2024-12-12T22:37:09,569 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/065c8ec2e2b34016b8c99c316403ed14, entries=150, sequenceid=472, filesize=12.0 K 2024-12-12T22:37:09,570 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/.tmp/C/34b19a7f654942fc85de4d31b363c603 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/34b19a7f654942fc85de4d31b363c603 2024-12-12T22:37:09,573 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/34b19a7f654942fc85de4d31b363c603, entries=150, sequenceid=472, filesize=12.0 K 2024-12-12T22:37:09,574 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for ead55ce3707e32db5ec1e629ea38c388 in 545ms, sequenceid=472, compaction requested=true 2024-12-12T22:37:09,574 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cd56c5067fc14891a46265f11a9b5eed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/349d932a2e7b4944bb0c613d298e43a5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/db773ab7db45486da2e690a3cd3697aa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8c8fa76114284951aa3c2718db44cfe7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/d8d2e9c3f4474952af53ee623d148a5b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7d640098d3444807842db80a0d56bc9f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/f5aa53c3ffa748d185dae93aa588b331, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bd9375b4ff94dcba2986f0cd82eeaa5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/550d2417e79c413ab3427dfaafb5c8ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c] to archive 2024-12-12T22:37:09,575 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:09,577 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/5f6a690bbf104d24930fe60a55e265b6 2024-12-12T22:37:09,577 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3ccd56fbf3cb4cff9acb43b087818e88 2024-12-12T22:37:09,578 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cd56c5067fc14891a46265f11a9b5eed to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cd56c5067fc14891a46265f11a9b5eed 2024-12-12T22:37:09,579 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/826b28d8b73f437a9369c25955f94e1d 2024-12-12T22:37:09,579 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/37a2d379e4be4e349535c7bb1408fd58 2024-12-12T22:37:09,579 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a38d53792f87408aa5c7b65f5ed24de7 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/cb40a3e180ca4aa8b0c2359d1eb75a84 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e0b01c2914e447b4b3d0c212172f2758 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/613d3c48347a4fde989190b2ab6b7ec5 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/db773ab7db45486da2e690a3cd3697aa to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/db773ab7db45486da2e690a3cd3697aa 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/349d932a2e7b4944bb0c613d298e43a5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/349d932a2e7b4944bb0c613d298e43a5 2024-12-12T22:37:09,580 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8c8fa76114284951aa3c2718db44cfe7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8c8fa76114284951aa3c2718db44cfe7 2024-12-12T22:37:09,581 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/0b88ef9a3b6e4629a37f25b31806399c 2024-12-12T22:37:09,582 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/76f2f01b8a4f44d89406d51b8a98f79f 2024-12-12T22:37:09,582 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/4c58ca1c6b394516b50ee11d00c291fa 2024-12-12T22:37:09,582 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/8d90cadceaf645efa73152fd5a65da52 2024-12-12T22:37:09,582 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/d8d2e9c3f4474952af53ee623d148a5b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/d8d2e9c3f4474952af53ee623d148a5b 2024-12-12T22:37:09,582 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/e90da0b1590c4702a36f5b4f2b59f872 2024-12-12T22:37:09,583 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/06d875d8f537461aa607c4acda761675 2024-12-12T22:37:09,583 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6b4f905407c14070b540c82e4f675637 2024-12-12T22:37:09,584 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7d640098d3444807842db80a0d56bc9f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7d640098d3444807842db80a0d56bc9f 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/30bc362075f04748a14ba9dfe498f860 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/462b1ca3f1f148218d683ef5b92950da 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/f5aa53c3ffa748d185dae93aa588b331 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/f5aa53c3ffa748d185dae93aa588b331 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/83c9ab4df3344a3f8504ca9cdd1b3748 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/6ac6a8087cf84b8e9e3970142f61e102 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/a1b9ca7e84d74140ad7e7da2bc42cbd5 2024-12-12T22:37:09,586 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bd9375b4ff94dcba2986f0cd82eeaa5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bd9375b4ff94dcba2986f0cd82eeaa5 2024-12-12T22:37:09,587 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/3bfc1b1a2ab04c3295e3442128e42b31 2024-12-12T22:37:09,587 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/c96c0ee0f56c4a1b8e748442a454808c 2024-12-12T22:37:09,587 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/b6bf1cf3a600400cba5a99c011c1b5f7 2024-12-12T22:37:09,588 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/550d2417e79c413ab3427dfaafb5c8ff to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/550d2417e79c413ab3427dfaafb5c8ff 2024-12-12T22:37:09,589 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5570490a091044bf86806134abd41a29, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c5b328803e9b4a2881d951e876786e99, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/048a5ac0db134acf9f19b208b80fd2f0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/b3f08bb7e38247d4baccde3b532c8c37, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d811b6bb9404427e9abe25fb3f21d210, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/833eb47bc75c45c58f9e2a690d3dc4e5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/024c077f847f4dc9a3f696fdad6d3c6c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/f7abfa3712af4d99940dd2973efbae4d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/54878681907d40beb5f10bdde2314dfe, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31] to archive 2024-12-12T22:37:09,590 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:09,592 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d410030255a64a93bfb75a7ea07af751 2024-12-12T22:37:09,592 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5570490a091044bf86806134abd41a29 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5570490a091044bf86806134abd41a29 2024-12-12T22:37:09,592 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0d8519bdec6544a38311285073d08589 2024-12-12T22:37:09,593 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a90b06d6438a42e7b0b211a1206de583 2024-12-12T22:37:09,593 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5b55edb621e24bcfa9856f162731306f 2024-12-12T22:37:09,593 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/eea8600a803347179e6af71b87862634 2024-12-12T22:37:09,593 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c5b328803e9b4a2881d951e876786e99 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c5b328803e9b4a2881d951e876786e99 2024-12-12T22:37:09,594 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/077e54bd055541b0877ed2a02f395684 2024-12-12T22:37:09,595 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/b3f08bb7e38247d4baccde3b532c8c37 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/b3f08bb7e38247d4baccde3b532c8c37 2024-12-12T22:37:09,595 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6693de75d9bb4e94a6017b23ecd226b3 2024-12-12T22:37:09,595 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/c7cf33604cc647278affa42b246da8fe 2024-12-12T22:37:09,595 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d811b6bb9404427e9abe25fb3f21d210 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/d811b6bb9404427e9abe25fb3f21d210 2024-12-12T22:37:09,595 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/80c0d45c0647429db88a8ffd98f9f816 2024-12-12T22:37:09,596 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/a98795cb3b75472c823c12d7b72bbaaa 2024-12-12T22:37:09,596 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/331f01f784394f94b6e66ce6c118638f 2024-12-12T22:37:09,597 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/048a5ac0db134acf9f19b208b80fd2f0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/048a5ac0db134acf9f19b208b80fd2f0 2024-12-12T22:37:09,598 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/5a3f9eab3d7d49c3ab25d309e93d9b18 2024-12-12T22:37:09,598 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/833eb47bc75c45c58f9e2a690d3dc4e5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/833eb47bc75c45c58f9e2a690d3dc4e5 2024-12-12T22:37:09,598 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/3774fd1b9267437885f024ccf3cb5c63 2024-12-12T22:37:09,599 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/73f16732ec4648149d6239001abd2bb7 2024-12-12T22:37:09,599 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/0e4033031cee4a16b871d8460014d801 2024-12-12T22:37:09,599 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/37688e61337442c4aae9e1a303c74986 2024-12-12T22:37:09,600 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/024c077f847f4dc9a3f696fdad6d3c6c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/024c077f847f4dc9a3f696fdad6d3c6c 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/288a2682570e42c29a8294b2ed00b619 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/6e4c0370ce9a45e3a83ea4350e99ca0c 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/54878681907d40beb5f10bdde2314dfe to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/54878681907d40beb5f10bdde2314dfe 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/e0c93cf343cb4f89980781baf095f022 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/28858bc77d054351b01449aa8b3bea04 2024-12-12T22:37:09,601 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/be04185007374763a89161b3fc546883 2024-12-12T22:37:09,602 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/bedfe09329b6461a97405c35592858d2 2024-12-12T22:37:09,602 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/f7abfa3712af4d99940dd2973efbae4d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/f7abfa3712af4d99940dd2973efbae4d 2024-12-12T22:37:09,602 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/03dfeae19600470abb0b17dafe73dc31 2024-12-12T22:37:09,603 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f63dcab337df4b5aa1ceb0921e947339, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f4287dcd7b7b444ca82ef07b18dc9af2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/beab640e71014bc487a14c04eef22888, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ffbf199fab1142b1b3635da70e642659, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f984bb104f5049e080166af16a81350b, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c4b961d052944ac1aa58e14a3139b740, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/964df1ea72d443ae9ba30bdf6ce4fbe6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/5684b2e471494a8184a28f458c511fe9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8] to archive 2024-12-12T22:37:09,604 DEBUG [StoreCloser-TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:09,606 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/110576b3cc6e4d22b60ea610bda9b6ed 2024-12-12T22:37:09,606 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/45c9daef44644420ad4c3b40451b8152 2024-12-12T22:37:09,607 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f63dcab337df4b5aa1ceb0921e947339 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f63dcab337df4b5aa1ceb0921e947339 2024-12-12T22:37:09,607 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/4eb4ae51f0904b4c9cea7250cfca9b57 2024-12-12T22:37:09,607 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/2a3ef50fe04a4d4583bdbea6d5d5a403 2024-12-12T22:37:09,607 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f4287dcd7b7b444ca82ef07b18dc9af2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f4287dcd7b7b444ca82ef07b18dc9af2 2024-12-12T22:37:09,607 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/0e1ccc82ab0943bc8ceaa1073dedf48d 2024-12-12T22:37:09,608 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/d186c4f3fed34b8295f8e9924f365dd3 2024-12-12T22:37:09,608 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/11d3d554e49c4307857965bf95736a9d 2024-12-12T22:37:09,608 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/37e4c48d7e304fd28024c96f45eecfdc 2024-12-12T22:37:09,609 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/393472af807c4a329b5ccba9e91cf8e3 2024-12-12T22:37:09,609 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ffbf199fab1142b1b3635da70e642659 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ffbf199fab1142b1b3635da70e642659 2024-12-12T22:37:09,609 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/79230819d7a6469b8816a7c6d78dbf56 2024-12-12T22:37:09,609 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/beab640e71014bc487a14c04eef22888 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/beab640e71014bc487a14c04eef22888 2024-12-12T22:37:09,609 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c38969da12d14a558d239f5dee6b7a0c 2024-12-12T22:37:09,610 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/3a55766c36bf4994ad22325153afa655 2024-12-12T22:37:09,610 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f984bb104f5049e080166af16a81350b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f984bb104f5049e080166af16a81350b 2024-12-12T22:37:09,610 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/7e438ed9a5894a3aa2c223745ecef843 2024-12-12T22:37:09,610 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/ff295e00ab36442cac81440a3f08ab4c 2024-12-12T22:37:09,610 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/54480341c69449eabd49d1a227a34bc4 2024-12-12T22:37:09,611 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c4b961d052944ac1aa58e14a3139b740 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c4b961d052944ac1aa58e14a3139b740 2024-12-12T22:37:09,611 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/51e6eee0c6074b3b96c4ac0a27838b1c 2024-12-12T22:37:09,612 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/93b2ed5052674bec855c11cf78e62098 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/964df1ea72d443ae9ba30bdf6ce4fbe6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/964df1ea72d443ae9ba30bdf6ce4fbe6 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/38dde18c2d5e49eb990fab848d52e44a 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f44964eb6353417e9c1461ba3f3aab02 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/e210a14f61b54e928336f4f7a7522a8e 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/5684b2e471494a8184a28f458c511fe9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/5684b2e471494a8184a28f458c511fe9 2024-12-12T22:37:09,613 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/105514c76afb40f196e8b5285cad48f6 2024-12-12T22:37:09,614 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/c83f0d4d6bdf474eb2b8d658adfddae8 2024-12-12T22:37:09,614 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/b290939d640940379e888528e1742ffc 2024-12-12T22:37:09,617 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/recovered.edits/475.seqid, newMaxSeqId=475, maxSeqId=1 2024-12-12T22:37:09,618 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388. 2024-12-12T22:37:09,618 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for ead55ce3707e32db5ec1e629ea38c388: 2024-12-12T22:37:09,619 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:37:09,619 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=ead55ce3707e32db5ec1e629ea38c388, regionState=CLOSED 2024-12-12T22:37:09,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-12T22:37:09,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure ead55ce3707e32db5ec1e629ea38c388, server=1aef280cf0a8,36025,1734042873576 in 744 msec 2024-12-12T22:37:09,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-12-12T22:37:09,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=ead55ce3707e32db5ec1e629ea38c388, UNASSIGN in 747 msec 2024-12-12T22:37:09,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-12T22:37:09,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 749 msec 2024-12-12T22:37:09,624 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043029624"}]},"ts":"1734043029624"} 2024-12-12T22:37:09,625 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:37:09,666 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:37:09,667 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.0630 sec 2024-12-12T22:37:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-12T22:37:09,710 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-12T22:37:09,710 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:37:09,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,711 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T22:37:09,712 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,713 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:37:09,714 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/recovered.edits] 2024-12-12T22:37:09,717 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/089554f353844ae286ecccc60aa2f2ca to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/089554f353844ae286ecccc60aa2f2ca 2024-12-12T22:37:09,717 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/899fbcc321fc484c802adad9549437a5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/899fbcc321fc484c802adad9549437a5 2024-12-12T22:37:09,717 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7649c4f9c1894b28802b1012854b0c08 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/A/7649c4f9c1894b28802b1012854b0c08 2024-12-12T22:37:09,719 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/065c8ec2e2b34016b8c99c316403ed14 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/065c8ec2e2b34016b8c99c316403ed14 2024-12-12T22:37:09,719 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/8c7f4c7ad2d64784b751f6313dd5ba86 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/8c7f4c7ad2d64784b751f6313dd5ba86 2024-12-12T22:37:09,719 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/fa884493759c431289509ed674231b89 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/B/fa884493759c431289509ed674231b89 2024-12-12T22:37:09,722 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/34b19a7f654942fc85de4d31b363c603 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/34b19a7f654942fc85de4d31b363c603 2024-12-12T22:37:09,722 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/9b771bdcb029478caec46b41e166c92b to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/9b771bdcb029478caec46b41e166c92b 2024-12-12T22:37:09,722 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f43f986027a847ddb4d2cd7f0032310e to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/C/f43f986027a847ddb4d2cd7f0032310e 2024-12-12T22:37:09,725 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/recovered.edits/475.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388/recovered.edits/475.seqid 2024-12-12T22:37:09,725 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/ead55ce3707e32db5ec1e629ea38c388 2024-12-12T22:37:09,725 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:37:09,727 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,729 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:37:09,730 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:37:09,731 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,731 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:37:09,731 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734043029731"}]},"ts":"9223372036854775807"} 2024-12-12T22:37:09,733 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:37:09,733 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => ead55ce3707e32db5ec1e629ea38c388, NAME => 'TestAcidGuarantees,,1734042998231.ead55ce3707e32db5ec1e629ea38c388.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:37:09,733 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:37:09,733 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734043029733"}]},"ts":"9223372036854775807"} 2024-12-12T22:37:09,734 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:37:09,741 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 32 msec 2024-12-12T22:37:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-12T22:37:09,813 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-12T22:37:09,823 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=243 (was 246), OpenFileDescriptor=450 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1496 (was 1631), ProcessCount=9 (was 11), AvailableMemoryMB=3871 (was 4068) 2024-12-12T22:37:09,832 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=243, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=1425, ProcessCount=9, AvailableMemoryMB=3870 2024-12-12T22:37:09,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:37:09,833 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:37:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:09,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-12T22:37:09,835 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:09,835 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-12-12T22:37:09,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-12T22:37:09,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:09,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742334_1510 (size=963) 2024-12-12T22:37:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:10,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:10,242 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc 2024-12-12T22:37:10,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742335_1511 (size=53) 2024-12-12T22:37:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 487bd01b47fe4fd77cb2b4619f92faba, disabling compactions & flushes 2024-12-12T22:37:10,662 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. after waiting 0 ms 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:10,662 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:10,662 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:10,663 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-12T22:37:10,663 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734043030663"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734043030663"}]},"ts":"1734043030663"} 2024-12-12T22:37:10,664 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-12T22:37:10,664 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-12T22:37:10,664 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043030664"}]},"ts":"1734043030664"} 2024-12-12T22:37:10,665 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-12T22:37:10,716 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, ASSIGN}] 2024-12-12T22:37:10,717 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, ASSIGN 2024-12-12T22:37:10,718 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, ASSIGN; state=OFFLINE, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=false 2024-12-12T22:37:10,868 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:10,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:37:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:11,021 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:11,023 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:11,023 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:37:11,023 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,023 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:37:11,023 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,023 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,025 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,026 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:11,026 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName A 2024-12-12T22:37:11,026 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:11,027 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:11,027 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,028 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:11,028 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName B 2024-12-12T22:37:11,028 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:11,028 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:11,028 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,029 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:11,029 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName C 2024-12-12T22:37:11,029 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:11,030 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:11,030 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:11,030 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,030 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,031 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:37:11,032 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:11,034 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-12T22:37:11,034 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened 487bd01b47fe4fd77cb2b4619f92faba; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74630015, jitterRate=0.1120738834142685}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:37:11,035 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:11,035 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., pid=129, masterSystemTime=1734043031020 2024-12-12T22:37:11,036 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:11,036 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:11,037 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=OPEN, openSeqNum=2, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:11,038 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-12T22:37:11,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 in 168 msec 2024-12-12T22:37:11,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-12-12T22:37:11,039 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, ASSIGN in 322 msec 2024-12-12T22:37:11,040 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-12T22:37:11,040 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043031040"}]},"ts":"1734043031040"} 2024-12-12T22:37:11,041 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-12T22:37:11,109 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-12T22:37:11,110 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2760 sec 2024-12-12T22:37:11,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-12-12T22:37:11,940 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-12-12T22:37:11,941 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e047c09 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11030ef5 2024-12-12T22:37:12,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1584f18a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:12,044 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:12,045 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:12,046 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-12T22:37:12,047 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57368, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-12T22:37:12,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-12T22:37:12,049 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-12T22:37:12,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:12,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742336_1512 (size=999) 2024-12-12T22:37:12,468 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-12T22:37:12,468 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-12T22:37:12,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:37:12,472 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, REOPEN/MOVE}] 2024-12-12T22:37:12,473 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, REOPEN/MOVE 2024-12-12T22:37:12,473 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:12,474 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:37:12,474 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:37:12,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:12,627 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,627 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:37:12,627 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 487bd01b47fe4fd77cb2b4619f92faba, disabling compactions & flushes 2024-12-12T22:37:12,627 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,627 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,628 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. after waiting 0 ms 2024-12-12T22:37:12,628 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,634 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-12T22:37:12,635 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,635 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:12,635 WARN [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegionServer(3786): Not adding moved region record: 487bd01b47fe4fd77cb2b4619f92faba to self. 2024-12-12T22:37:12,637 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,637 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=CLOSED 2024-12-12T22:37:12,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-12T22:37:12,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 in 164 msec 2024-12-12T22:37:12,640 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, REOPEN/MOVE; state=CLOSED, location=1aef280cf0a8,36025,1734042873576; forceNewPlan=false, retain=true 2024-12-12T22:37:12,790 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=OPENING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:12,791 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE; OpenRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:37:12,943 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:12,945 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,945 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7285): Opening region: {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} 2024-12-12T22:37:12,945 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-12T22:37:12,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7327): checking encryption for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,946 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7330): checking classloading for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,947 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,948 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:12,948 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName A 2024-12-12T22:37:12,949 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:12,949 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:12,950 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,950 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:12,951 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName B 2024-12-12T22:37:12,951 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:12,951 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:12,951 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,952 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-12T22:37:12,952 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 487bd01b47fe4fd77cb2b4619f92faba columnFamilyName C 2024-12-12T22:37:12,952 DEBUG [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:12,952 INFO [StoreOpener-487bd01b47fe4fd77cb2b4619f92faba-1 {}] regionserver.HStore(327): Store=487bd01b47fe4fd77cb2b4619f92faba/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-12T22:37:12,953 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,958 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,959 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,960 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-12T22:37:12,962 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1085): writing seq id for 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:12,962 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1102): Opened 487bd01b47fe4fd77cb2b4619f92faba; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69589633, jitterRate=0.036966338753700256}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-12T22:37:12,963 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1001): Region open journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:12,964 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., pid=134, masterSystemTime=1734043032943 2024-12-12T22:37:12,965 DEBUG [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,965 INFO [RS_OPEN_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:12,966 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=OPEN, openSeqNum=5, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:12,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=132 2024-12-12T22:37:12,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=132, state=SUCCESS; OpenRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 in 176 msec 2024-12-12T22:37:12,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=131 2024-12-12T22:37:12,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=131, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, REOPEN/MOVE in 496 msec 2024-12-12T22:37:12,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-12T22:37:12,971 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 500 msec 2024-12-12T22:37:12,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 922 msec 2024-12-12T22:37:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-12T22:37:12,975 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x60d631a3 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@69abefea 2024-12-12T22:37:13,017 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b914bf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,017 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-12-12T22:37:13,036 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,037 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-12-12T22:37:13,047 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,048 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-12-12T22:37:13,080 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,081 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-12-12T22:37:13,096 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,097 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5bb75907 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@68c2838a 2024-12-12T22:37:13,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@458a85fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,110 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c1d3a95 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@50bf224f 2024-12-12T22:37:13,130 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@410bf0c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,130 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x503a7d2e to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79be903c 2024-12-12T22:37:13,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67adb273, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,147 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x404bb685 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4d79f1c0 2024-12-12T22:37:13,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@474dec36, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42aacb30 to 127.0.0.1:50645 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@40dfd554 2024-12-12T22:37:13,171 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68dbad25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-12T22:37:13,173 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:13,174 DEBUG [hconnection-0x5ea21d33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=135, table=TestAcidGuarantees 2024-12-12T22:37:13,175 DEBUG [hconnection-0x71494ee3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,175 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=135, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=135, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:13,175 DEBUG [hconnection-0x1c2a9547-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:13,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48718, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,176 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=135, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=135, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:13,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=136, ppid=135, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:13,176 DEBUG [hconnection-0x63f3ae5a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,177 DEBUG [hconnection-0x5801f8f8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,177 DEBUG [hconnection-0x543f194-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,179 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,180 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48740, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,180 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,181 DEBUG [hconnection-0x3f436697-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,182 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48764, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,182 DEBUG [hconnection-0x36dc7adf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,183 DEBUG [hconnection-0x78a52a9e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,183 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48780, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,184 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:13,186 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-12T22:37:13,186 DEBUG [hconnection-0x64ab6fcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:13,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,188 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-12T22:37:13,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043093205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043093206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043093207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043093210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043093212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,215 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121258cd658eb6304cfb90fa7ef88039af87_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043033186/Put/seqid=0 2024-12-12T22:37:13,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742337_1513 (size=12154) 2024-12-12T22:37:13,243 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:13,264 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121258cd658eb6304cfb90fa7ef88039af87_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258cd658eb6304cfb90fa7ef88039af87_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:13,268 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/bf103fea04c44ab1a13529101d38f2e5, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:13,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/bf103fea04c44ab1a13529101d38f2e5 is 175, key is test_row_0/A:col10/1734043033186/Put/seqid=0 2024-12-12T22:37:13,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:13,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742338_1514 (size=30955) 2024-12-12T22:37:13,306 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/bf103fea04c44ab1a13529101d38f2e5 2024-12-12T22:37:13,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043093312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043093313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043093315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043093316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,320 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043093319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=136 2024-12-12T22:37:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:13,330 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=136}] handler.RSProcedureHandler(58): pid=136 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=136 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=136 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0ef4a98a167e4d1ab61c7bb592e94bf8 is 50, key is test_row_0/B:col10/1734043033186/Put/seqid=0 2024-12-12T22:37:13,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742339_1515 (size=12001) 2024-12-12T22:37:13,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0ef4a98a167e4d1ab61c7bb592e94bf8 2024-12-12T22:37:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:13,486 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/1e818518c5064689b8bbcc3fbff62068 is 50, key is test_row_0/C:col10/1734043033186/Put/seqid=0 2024-12-12T22:37:13,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,487 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=136 2024-12-12T22:37:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:13,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=136}] handler.RSProcedureHandler(58): pid=136 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=136 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=136 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:13,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043093515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043093518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043093519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742340_1516 (size=12001) 2024-12-12T22:37:13,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043093525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/1e818518c5064689b8bbcc3fbff62068 2024-12-12T22:37:13,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043093535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/bf103fea04c44ab1a13529101d38f2e5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5 2024-12-12T22:37:13,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5, entries=150, sequenceid=18, filesize=30.2 K 2024-12-12T22:37:13,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0ef4a98a167e4d1ab61c7bb592e94bf8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8 2024-12-12T22:37:13,611 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8, entries=150, sequenceid=18, filesize=11.7 K 2024-12-12T22:37:13,612 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/1e818518c5064689b8bbcc3fbff62068 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068 2024-12-12T22:37:13,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068, entries=150, sequenceid=18, filesize=11.7 K 2024-12-12T22:37:13,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 487bd01b47fe4fd77cb2b4619f92faba in 432ms, sequenceid=18, compaction requested=false 2024-12-12T22:37:13,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:13,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=136 2024-12-12T22:37:13,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:13,640 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:13,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:13,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c86f478fea3c4b2782555da3997b555a_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043033207/Put/seqid=0 2024-12-12T22:37:13,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742341_1517 (size=12154) 2024-12-12T22:37:13,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:13,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:13,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:13,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043093835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043093838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043093839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043093840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043093850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043093944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043093944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:13,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:13,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043093944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:14,119 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212c86f478fea3c4b2782555da3997b555a_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c86f478fea3c4b2782555da3997b555a_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:14,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31999745c06d47929a8021dee1c4875a, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:14,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31999745c06d47929a8021dee1c4875a is 175, key is test_row_0/A:col10/1734043033207/Put/seqid=0 2024-12-12T22:37:14,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742342_1518 (size=30955) 2024-12-12T22:37:14,132 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31999745c06d47929a8021dee1c4875a 2024-12-12T22:37:14,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/bfd461df81054eb292088ea0bb6c40e2 is 50, key is test_row_0/B:col10/1734043033207/Put/seqid=0 2024-12-12T22:37:14,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043094147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043094147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043094147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742343_1519 (size=12001) 2024-12-12T22:37:14,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:14,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043094344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043094359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043094451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043094451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:14,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043094451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:14,563 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/bfd461df81054eb292088ea0bb6c40e2 2024-12-12T22:37:14,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e88fdfe01bd34132bf8f2b67945e0cd8 is 50, key is test_row_0/C:col10/1734043033207/Put/seqid=0 2024-12-12T22:37:14,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742344_1520 (size=12001) 2024-12-12T22:37:14,654 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e88fdfe01bd34132bf8f2b67945e0cd8 2024-12-12T22:37:14,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31999745c06d47929a8021dee1c4875a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a 2024-12-12T22:37:14,662 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a, entries=150, sequenceid=40, filesize=30.2 K 2024-12-12T22:37:14,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/bfd461df81054eb292088ea0bb6c40e2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2 2024-12-12T22:37:14,669 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T22:37:14,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e88fdfe01bd34132bf8f2b67945e0cd8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8 2024-12-12T22:37:14,676 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8, entries=150, sequenceid=40, filesize=11.7 K 2024-12-12T22:37:14,677 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 487bd01b47fe4fd77cb2b4619f92faba in 1037ms, sequenceid=40, compaction requested=false 2024-12-12T22:37:14,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:14,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:14,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=136}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=136 2024-12-12T22:37:14,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=136 2024-12-12T22:37:14,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=136, resume processing ppid=135 2024-12-12T22:37:14,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, ppid=135, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5050 sec 2024-12-12T22:37:14,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=135, table=TestAcidGuarantees in 1.5120 sec 2024-12-12T22:37:14,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T22:37:14,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:14,971 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:14,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f7b026bf2ce14c61a7d661c5bd96b821_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742345_1521 (size=14594) 2024-12-12T22:37:15,019 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:15,026 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f7b026bf2ce14c61a7d661c5bd96b821_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f7b026bf2ce14c61a7d661c5bd96b821_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:15,029 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31dea063c3c9465c8c1132b4a1feff84, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31dea063c3c9465c8c1132b4a1feff84 is 175, key is test_row_0/A:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742346_1522 (size=39549) 2024-12-12T22:37:15,062 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=58, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31dea063c3c9465c8c1132b4a1feff84 2024-12-12T22:37:15,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/07eff79b4fe54fd28a0fe8d85f4bfe59 is 50, key is test_row_0/B:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742347_1523 (size=12001) 2024-12-12T22:37:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,154 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-12T22:37:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-12T22:37:15,285 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 135 completed 2024-12-12T22:37:15,286 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:15,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=137, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=137, table=TestAcidGuarantees 2024-12-12T22:37:15,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:15,289 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=137, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=137, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:15,290 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=137, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=137, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:15,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043095345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043095369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:15,443 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=138 2024-12-12T22:37:15,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:15,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=138}] handler.RSProcedureHandler(58): pid=138 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:15,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=138 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=138 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:15,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/07eff79b4fe54fd28a0fe8d85f4bfe59 2024-12-12T22:37:15,504 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/25ac1095d5de4f3daf8b36612223784d is 50, key is test_row_0/C:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742348_1524 (size=12001) 2024-12-12T22:37:15,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=58 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/25ac1095d5de4f3daf8b36612223784d 2024-12-12T22:37:15,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/31dea063c3c9465c8c1132b4a1feff84 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84 2024-12-12T22:37:15,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84, entries=200, sequenceid=58, filesize=38.6 K 2024-12-12T22:37:15,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/07eff79b4fe54fd28a0fe8d85f4bfe59 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59 2024-12-12T22:37:15,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59, entries=150, sequenceid=58, filesize=11.7 K 2024-12-12T22:37:15,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/25ac1095d5de4f3daf8b36612223784d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d 2024-12-12T22:37:15,542 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d, entries=150, sequenceid=58, filesize=11.7 K 2024-12-12T22:37:15,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 487bd01b47fe4fd77cb2b4619f92faba in 574ms, sequenceid=58, compaction requested=true 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:15,543 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:15,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:15,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:37:15,544 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:15,545 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:15,545 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:15,545 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:15,545 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:15,545 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,545 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,545 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=99.1 K 2024-12-12T22:37:15,545 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=35.2 K 2024-12-12T22:37:15,545 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,545 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84] 2024-12-12T22:37:15,546 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bf103fea04c44ab1a13529101d38f2e5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734043033185 2024-12-12T22:37:15,546 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ef4a98a167e4d1ab61c7bb592e94bf8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734043033185 2024-12-12T22:37:15,547 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfd461df81054eb292088ea0bb6c40e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734043033199 2024-12-12T22:37:15,547 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 31999745c06d47929a8021dee1c4875a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734043033199 2024-12-12T22:37:15,548 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07eff79b4fe54fd28a0fe8d85f4bfe59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033836 2024-12-12T22:37:15,548 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 31dea063c3c9465c8c1132b4a1feff84, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033834 2024-12-12T22:37:15,571 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#446 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:15,572 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/62da6a9f8efa4093939255da88425a55 is 50, key is test_row_0/B:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,573 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,575 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121273185c191264469cb152759bea1307e8_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742349_1525 (size=12104) 2024-12-12T22:37:15,577 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121273185c191264469cb152759bea1307e8_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,578 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121273185c191264469cb152759bea1307e8_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,582 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/62da6a9f8efa4093939255da88425a55 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/62da6a9f8efa4093939255da88425a55 2024-12-12T22:37:15,587 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 62da6a9f8efa4093939255da88425a55(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:15,587 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:15,587 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043035543; duration=0sec 2024-12-12T22:37:15,587 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:15,587 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:15,588 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:15,588 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:15,588 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:15,589 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,589 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=35.2 K 2024-12-12T22:37:15,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:15,589 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e818518c5064689b8bbcc3fbff62068, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734043033185 2024-12-12T22:37:15,590 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e88fdfe01bd34132bf8f2b67945e0cd8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734043033199 2024-12-12T22:37:15,590 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25ac1095d5de4f3daf8b36612223784d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033836 2024-12-12T22:37:15,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742350_1526 (size=4469) 2024-12-12T22:37:15,599 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,599 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=138 2024-12-12T22:37:15,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:15,599 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T22:37:15,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:15,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:15,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:15,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:15,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:15,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:15,604 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#448 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:15,604 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/176c6be7ea6147538c47e5d424a68310 is 50, key is test_row_0/C:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:15,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129c65e9beb6ea45bc912a2bb375cc5cd9_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043034997/Put/seqid=0 2024-12-12T22:37:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:15,622 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:15,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742351_1527 (size=12104) 2024-12-12T22:37:15,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742352_1528 (size=12154) 2024-12-12T22:37:15,628 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/176c6be7ea6147538c47e5d424a68310 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/176c6be7ea6147538c47e5d424a68310 2024-12-12T22:37:15,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:15,637 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 176c6be7ea6147538c47e5d424a68310(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:15,637 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:15,637 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043035543; duration=0sec 2024-12-12T22:37:15,638 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:15,638 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:15,640 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129c65e9beb6ea45bc912a2bb375cc5cd9_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129c65e9beb6ea45bc912a2bb375cc5cd9_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:15,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/267007e07c68428380d597f91506b6a3, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:15,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/267007e07c68428380d597f91506b6a3 is 175, key is test_row_0/A:col10/1734043034997/Put/seqid=0 2024-12-12T22:37:15,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742353_1529 (size=30955) 2024-12-12T22:37:15,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,664 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095662, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:15,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043095979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043095980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:15,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:15,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043095985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,000 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#447 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:16,001 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/e2ee0016bc9041d3aff5137523138ea5 is 175, key is test_row_0/A:col10/1734043034959/Put/seqid=0 2024-12-12T22:37:16,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742354_1530 (size=31058) 2024-12-12T22:37:16,047 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=77, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/267007e07c68428380d597f91506b6a3 2024-12-12T22:37:16,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/5b8e8895d75b406da0cc6f4d09fce601 is 50, key is test_row_0/B:col10/1734043034997/Put/seqid=0 2024-12-12T22:37:16,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742355_1531 (size=12001) 2024-12-12T22:37:16,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043096283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043096283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,290 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043096288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:16,436 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/e2ee0016bc9041d3aff5137523138ea5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5 2024-12-12T22:37:16,444 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into e2ee0016bc9041d3aff5137523138ea5(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:16,445 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:16,445 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043035543; duration=0sec 2024-12-12T22:37:16,445 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:16,445 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:16,516 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/5b8e8895d75b406da0cc6f4d09fce601 2024-12-12T22:37:16,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9cbdca274dcd49299add277957b3c2f4 is 50, key is test_row_0/C:col10/1734043034997/Put/seqid=0 2024-12-12T22:37:16,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742356_1532 (size=12001) 2024-12-12T22:37:16,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043096788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,795 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043096794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043096794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:16,959 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9cbdca274dcd49299add277957b3c2f4 2024-12-12T22:37:16,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/267007e07c68428380d597f91506b6a3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3 2024-12-12T22:37:16,969 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3, entries=150, sequenceid=77, filesize=30.2 K 2024-12-12T22:37:16,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/5b8e8895d75b406da0cc6f4d09fce601 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601 2024-12-12T22:37:17,008 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T22:37:17,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9cbdca274dcd49299add277957b3c2f4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4 2024-12-12T22:37:17,012 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4, entries=150, sequenceid=77, filesize=11.7 K 2024-12-12T22:37:17,013 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 487bd01b47fe4fd77cb2b4619f92faba in 1414ms, sequenceid=77, compaction requested=false 2024-12-12T22:37:17,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:17,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-12T22:37:17,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=138 2024-12-12T22:37:17,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=137 2024-12-12T22:37:17,016 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=137, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7240 sec 2024-12-12T22:37:17,017 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=137, table=TestAcidGuarantees in 1.7300 sec 2024-12-12T22:37:17,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:17,367 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T22:37:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:17,367 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:17,368 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:17,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121244a2ee77ca42470199d9731c275f7fef_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:17,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742357_1533 (size=12154) 2024-12-12T22:37:17,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=137 2024-12-12T22:37:17,392 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 137 completed 2024-12-12T22:37:17,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043097391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,395 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:17,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043097393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=139, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=139, table=TestAcidGuarantees 2024-12-12T22:37:17,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:17,403 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=139, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=139, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:17,407 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=139, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=139, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:17,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:17,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043097495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043097497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:17,560 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,560 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,701 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043097700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:17,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043097704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,713 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:17,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:17,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,787 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:17,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043097794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,801 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121244a2ee77ca42470199d9731c275f7fef_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121244a2ee77ca42470199d9731c275f7fef_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:17,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043097800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:17,803 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a03f57831b554d259ad8a19772536c09, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043097802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a03f57831b554d259ad8a19772536c09 is 175, key is test_row_0/A:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:17,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742358_1534 (size=30955) 2024-12-12T22:37:17,837 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=99, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a03f57831b554d259ad8a19772536c09 2024-12-12T22:37:17,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/4a6ff29399de48c78ffe5620002c2049 is 50, key is test_row_0/B:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:17,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:17,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:17,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:17,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742359_1535 (size=12001) 2024-12-12T22:37:17,915 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/4a6ff29399de48c78ffe5620002c2049 2024-12-12T22:37:17,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b43e0d4c69f045a1956dc393b63de056 is 50, key is test_row_0/C:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:17,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742360_1536 (size=12001) 2024-12-12T22:37:17,957 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b43e0d4c69f045a1956dc393b63de056 2024-12-12T22:37:17,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a03f57831b554d259ad8a19772536c09 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09 2024-12-12T22:37:17,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09, entries=150, sequenceid=99, filesize=30.2 K 2024-12-12T22:37:17,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/4a6ff29399de48c78ffe5620002c2049 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049 2024-12-12T22:37:17,976 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049, entries=150, sequenceid=99, filesize=11.7 K 2024-12-12T22:37:17,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b43e0d4c69f045a1956dc393b63de056 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056 2024-12-12T22:37:17,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056, entries=150, sequenceid=99, filesize=11.7 K 2024-12-12T22:37:17,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 487bd01b47fe4fd77cb2b4619f92faba in 625ms, sequenceid=99, compaction requested=true 2024-12-12T22:37:17,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:17,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:17,992 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:17,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:17,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:17,993 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:17,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:17,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:17,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:17,994 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:17,994 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=90.8 K 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:17,994 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,994 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09] 2024-12-12T22:37:17,994 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/62da6a9f8efa4093939255da88425a55, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=35.3 K 2024-12-12T22:37:17,994 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e2ee0016bc9041d3aff5137523138ea5, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033836 2024-12-12T22:37:17,995 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 267007e07c68428380d597f91506b6a3, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734043034997 2024-12-12T22:37:17,995 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62da6a9f8efa4093939255da88425a55, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033836 2024-12-12T22:37:17,995 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b8e8895d75b406da0cc6f4d09fce601, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734043034997 2024-12-12T22:37:17,995 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a03f57831b554d259ad8a19772536c09, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:17,995 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a6ff29399de48c78ffe5620002c2049, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:18,002 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:18,015 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#456 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:18,016 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8eeef63b1dc64f35bf8a6d04c3e5e667 is 50, key is test_row_0/B:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:18,017 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412120b9a6d85a7864089a915696eb94a1a1e_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,018 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412120b9a6d85a7864089a915696eb94a1a1e_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,018 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412120b9a6d85a7864089a915696eb94a1a1e_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,020 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T22:37:18,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:18,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:18,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:18,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:18,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742361_1537 (size=12207) 2024-12-12T22:37:18,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:18,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,038 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742362_1538 (size=4469) 2024-12-12T22:37:18,053 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b59f877f930542eb888bd2db20aa7c26_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043037381/Put/seqid=0 2024-12-12T22:37:18,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742363_1539 (size=12154) 2024-12-12T22:37:18,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043098105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043098107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,195 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:18,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:18,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043098208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043098208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,347 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:18,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:18,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,348 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,348 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043098414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043098423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,444 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#455 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:18,444 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8eeef63b1dc64f35bf8a6d04c3e5e667 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8eeef63b1dc64f35bf8a6d04c3e5e667 2024-12-12T22:37:18,444 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/6289983006be43d2af3e4b7f9b4a9259 is 175, key is test_row_0/A:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:18,450 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 8eeef63b1dc64f35bf8a6d04c3e5e667(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:18,450 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:18,450 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043037993; duration=0sec 2024-12-12T22:37:18,450 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:18,450 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:18,450 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:18,452 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:18,452 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:18,452 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,452 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/176c6be7ea6147538c47e5d424a68310, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=35.3 K 2024-12-12T22:37:18,452 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 176c6be7ea6147538c47e5d424a68310, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=58, earliestPutTs=1734043033836 2024-12-12T22:37:18,453 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cbdca274dcd49299add277957b3c2f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734043034997 2024-12-12T22:37:18,462 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b43e0d4c69f045a1956dc393b63de056, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:18,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742364_1540 (size=31161) 2024-12-12T22:37:18,471 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:18,471 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/6568955b83c041d3b42ba0d86b144781 is 50, key is test_row_0/C:col10/1734043037366/Put/seqid=0 2024-12-12T22:37:18,472 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:18,476 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212b59f877f930542eb888bd2db20aa7c26_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b59f877f930542eb888bd2db20aa7c26_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:18,477 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/67953b459449417b86c44c4feb73b2f6, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/67953b459449417b86c44c4feb73b2f6 is 175, key is test_row_0/A:col10/1734043037381/Put/seqid=0 2024-12-12T22:37:18,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742365_1541 (size=12207) 2024-12-12T22:37:18,498 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/6568955b83c041d3b42ba0d86b144781 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/6568955b83c041d3b42ba0d86b144781 2024-12-12T22:37:18,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:18,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:18,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] handler.RSProcedureHandler(58): pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=140 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=140 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:18,505 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 6568955b83c041d3b42ba0d86b144781(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:18,505 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:18,505 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043037993; duration=0sec 2024-12-12T22:37:18,505 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:18,505 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:18,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:18,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742366_1542 (size=30955) 2024-12-12T22:37:18,509 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/67953b459449417b86c44c4feb73b2f6 2024-12-12T22:37:18,523 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/fb0f842b824a492ab08933acb5cd66a9 is 50, key is test_row_0/B:col10/1734043037381/Put/seqid=0 2024-12-12T22:37:18,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742367_1543 (size=12001) 2024-12-12T22:37:18,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/fb0f842b824a492ab08933acb5cd66a9 2024-12-12T22:37:18,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9acd8b7adb6647339617b85bea4a30cb is 50, key is test_row_0/C:col10/1734043037381/Put/seqid=0 2024-12-12T22:37:18,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742368_1544 (size=12001) 2024-12-12T22:37:18,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9acd8b7adb6647339617b85bea4a30cb 2024-12-12T22:37:18,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/67953b459449417b86c44c4feb73b2f6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6 2024-12-12T22:37:18,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6, entries=150, sequenceid=117, filesize=30.2 K 2024-12-12T22:37:18,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/fb0f842b824a492ab08933acb5cd66a9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9 2024-12-12T22:37:18,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T22:37:18,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/9acd8b7adb6647339617b85bea4a30cb as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb 2024-12-12T22:37:18,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb, entries=150, sequenceid=117, filesize=11.7 K 2024-12-12T22:37:18,625 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 487bd01b47fe4fd77cb2b4619f92faba in 604ms, sequenceid=117, compaction requested=false 2024-12-12T22:37:18,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:18,652 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=140 2024-12-12T22:37:18,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:18,653 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:18,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:18,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121279aa951dc9124f42981f0f4ebef20b59_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043038106/Put/seqid=0 2024-12-12T22:37:18,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742369_1545 (size=12304) 2024-12-12T22:37:18,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:18,716 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121279aa951dc9124f42981f0f4ebef20b59_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279aa951dc9124f42981f0f4ebef20b59_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:18,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/f583be3b142041d28c829cc41fb3d304, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:18,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/f583be3b142041d28c829cc41fb3d304 is 175, key is test_row_0/A:col10/1734043038106/Put/seqid=0 2024-12-12T22:37:18,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:18,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:18,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742370_1546 (size=31105) 2024-12-12T22:37:18,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043098804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043098807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,868 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/6289983006be43d2af3e4b7f9b4a9259 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259 2024-12-12T22:37:18,871 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into 6289983006be43d2af3e4b7f9b4a9259(size=30.4 K), total size for store is 60.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:18,871 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:18,871 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043037992; duration=0sec 2024-12-12T22:37:18,872 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:18,872 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:18,909 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043098908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:18,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:18,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043098916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043099113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043099119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,144 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/f583be3b142041d28c829cc41fb3d304 2024-12-12T22:37:19,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/068dac28fed7497bb9404303cdf02265 is 50, key is test_row_0/B:col10/1734043038106/Put/seqid=0 2024-12-12T22:37:19,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742371_1547 (size=12151) 2024-12-12T22:37:19,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043099421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,430 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043099428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:19,651 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/068dac28fed7497bb9404303cdf02265 2024-12-12T22:37:19,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/a5d9846006794b40953a46a370e7dd57 is 50, key is test_row_0/C:col10/1734043038106/Put/seqid=0 2024-12-12T22:37:19,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742372_1548 (size=12151) 2024-12-12T22:37:19,690 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/a5d9846006794b40953a46a370e7dd57 2024-12-12T22:37:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/f583be3b142041d28c829cc41fb3d304 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304 2024-12-12T22:37:19,712 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304, entries=150, sequenceid=137, filesize=30.4 K 2024-12-12T22:37:19,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/068dac28fed7497bb9404303cdf02265 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265 2024-12-12T22:37:19,719 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T22:37:19,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/a5d9846006794b40953a46a370e7dd57 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57 2024-12-12T22:37:19,726 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57, entries=150, sequenceid=137, filesize=11.9 K 2024-12-12T22:37:19,739 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 487bd01b47fe4fd77cb2b4619f92faba in 1086ms, sequenceid=137, compaction requested=true 2024-12-12T22:37:19,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:19,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:19,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-12-12T22:37:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=140 2024-12-12T22:37:19,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=139 2024-12-12T22:37:19,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=139, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3390 sec 2024-12-12T22:37:19,753 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=139, table=TestAcidGuarantees in 2.3570 sec 2024-12-12T22:37:19,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-12T22:37:19,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:19,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:19,805 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:19,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:19,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:19,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:19,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:19,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128274b58dc2074abc9730d795d0922413_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:19,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742373_1549 (size=12304) 2024-12-12T22:37:19,859 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:19,865 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412128274b58dc2074abc9730d795d0922413_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128274b58dc2074abc9730d795d0922413_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:19,875 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/14ff077e33d947afa5824c90296ed0c0, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:19,876 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/14ff077e33d947afa5824c90296ed0c0 is 175, key is test_row_0/A:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:19,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742374_1550 (size=31105) 2024-12-12T22:37:19,939 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/14ff077e33d947afa5824c90296ed0c0 2024-12-12T22:37:19,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043099942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043099942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043099953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043099965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043099967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:19,973 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/99c9949f5edd473297792560ed17d6b8 is 50, key is test_row_0/B:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742375_1551 (size=12151) 2024-12-12T22:37:20,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043100059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043100065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043100067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043100074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043100078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043100261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043100269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043100272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043100281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043100281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/99c9949f5edd473297792560ed17d6b8 2024-12-12T22:37:20,453 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/2c5232442a2f4ae194ea600fa7b0a8ba is 50, key is test_row_0/C:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:20,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742376_1552 (size=12151) 2024-12-12T22:37:20,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043100567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043100575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043100582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043100586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:20,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043100588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:20,923 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/2c5232442a2f4ae194ea600fa7b0a8ba 2024-12-12T22:37:20,975 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/14ff077e33d947afa5824c90296ed0c0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0 2024-12-12T22:37:21,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0, entries=150, sequenceid=157, filesize=30.4 K 2024-12-12T22:37:21,023 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/99c9949f5edd473297792560ed17d6b8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8 2024-12-12T22:37:21,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T22:37:21,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/2c5232442a2f4ae194ea600fa7b0a8ba as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba 2024-12-12T22:37:21,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba, entries=150, sequenceid=157, filesize=11.9 K 2024-12-12T22:37:21,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=107.34 KB/109920 for 487bd01b47fe4fd77cb2b4619f92faba in 1265ms, sequenceid=157, compaction requested=true 2024-12-12T22:37:21,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:21,071 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:21,071 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:21,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:21,072 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:21,072 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:21,073 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,073 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8eeef63b1dc64f35bf8a6d04c3e5e667, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=47.4 K 2024-12-12T22:37:21,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:21,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:21,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=121.4 K 2024-12-12T22:37:21,073 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,073 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0] 2024-12-12T22:37:21,073 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 8eeef63b1dc64f35bf8a6d04c3e5e667, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:21,074 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6289983006be43d2af3e4b7f9b4a9259, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:21,074 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting fb0f842b824a492ab08933acb5cd66a9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734043037381 2024-12-12T22:37:21,077 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67953b459449417b86c44c4feb73b2f6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734043037381 2024-12-12T22:37:21,077 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 068dac28fed7497bb9404303cdf02265, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734043038102 2024-12-12T22:37:21,077 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 99c9949f5edd473297792560ed17d6b8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:21,078 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting f583be3b142041d28c829cc41fb3d304, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734043038102 2024-12-12T22:37:21,079 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14ff077e33d947afa5824c90296ed0c0, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:21,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:21,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-12T22:37:21,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:21,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:21,100 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:21,114 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#468 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:21,114 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/69f6455fe8064e54be7d6f3e48814cb4 is 50, key is test_row_0/B:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:21,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043101123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043101126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043101129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043101129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043101130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,140 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412125f93eefcf2d44c82a8643d5620e86708_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:21,142 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412125f93eefcf2d44c82a8643d5620e86708_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:21,142 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125f93eefcf2d44c82a8643d5620e86708_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:21,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f45717fa5904ab5ab08822caa9edc2e_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043039946/Put/seqid=0 2024-12-12T22:37:21,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742377_1553 (size=12493) 2024-12-12T22:37:21,188 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/69f6455fe8064e54be7d6f3e48814cb4 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/69f6455fe8064e54be7d6f3e48814cb4 2024-12-12T22:37:21,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742379_1555 (size=12304) 2024-12-12T22:37:21,211 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:21,213 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 69f6455fe8064e54be7d6f3e48814cb4(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:21,213 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:21,213 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=12, startTime=1734043041071; duration=0sec 2024-12-12T22:37:21,213 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:21,213 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:21,213 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:21,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742378_1554 (size=4469) 2024-12-12T22:37:21,233 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:21,234 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:21,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,234 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043101230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,234 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/6568955b83c041d3b42ba0d86b144781, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=47.4 K 2024-12-12T22:37:21,237 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6568955b83c041d3b42ba0d86b144781, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1734043035651 2024-12-12T22:37:21,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043101236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043101237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043101240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,243 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 9acd8b7adb6647339617b85bea4a30cb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734043037381 2024-12-12T22:37:21,246 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting a5d9846006794b40953a46a370e7dd57, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1734043038102 2024-12-12T22:37:21,249 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c5232442a2f4ae194ea600fa7b0a8ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:21,251 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412123f45717fa5904ab5ab08822caa9edc2e_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f45717fa5904ab5ab08822caa9edc2e_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:21,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043101246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,259 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/cd2785bda46b4afd9a0cbbfa13490ae2, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:21,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/cd2785bda46b4afd9a0cbbfa13490ae2 is 175, key is test_row_0/A:col10/1734043039946/Put/seqid=0 2024-12-12T22:37:21,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742380_1556 (size=31105) 2024-12-12T22:37:21,317 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#470 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:21,317 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/106f38c0972149e6bf06f9b73f4f65ad is 50, key is test_row_0/C:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:21,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742381_1557 (size=12493) 2024-12-12T22:37:21,365 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/106f38c0972149e6bf06f9b73f4f65ad as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/106f38c0972149e6bf06f9b73f4f65ad 2024-12-12T22:37:21,375 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 106f38c0972149e6bf06f9b73f4f65ad(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:21,375 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:21,375 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=12, startTime=1734043041071; duration=0sec 2024-12-12T22:37:21,375 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:21,375 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:21,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043101436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043101444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043101448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043101451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043101461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=139 2024-12-12T22:37:21,512 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 139 completed 2024-12-12T22:37:21,524 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:21,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=141, table=TestAcidGuarantees 2024-12-12T22:37:21,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:21,536 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=141, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=141, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:21,538 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=141, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=141, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:21,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:21,616 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#467 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:21,616 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/e61063bfe0844183b6b50b82c7099fca is 175, key is test_row_0/A:col10/1734043038798/Put/seqid=0 2024-12-12T22:37:21,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:21,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742382_1558 (size=31447) 2024-12-12T22:37:21,678 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/e61063bfe0844183b6b50b82c7099fca as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca 2024-12-12T22:37:21,689 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=142 2024-12-12T22:37:21,689 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into e61063bfe0844183b6b50b82c7099fca(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:21,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,689 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:21,689 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=12, startTime=1734043041070; duration=0sec 2024-12-12T22:37:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,690 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:21,690 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:21,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] handler.RSProcedureHandler(58): pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=142 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,712 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=177, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/cd2785bda46b4afd9a0cbbfa13490ae2 2024-12-12T22:37:21,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043101741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,746 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043101746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043101754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043101759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:21,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043101765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3ac647a4de0049e6ab768c1fc90194af is 50, key is test_row_0/B:col10/1734043039946/Put/seqid=0 2024-12-12T22:37:21,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742383_1559 (size=12151) 2024-12-12T22:37:21,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3ac647a4de0049e6ab768c1fc90194af 2024-12-12T22:37:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:21,850 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:21,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=142 2024-12-12T22:37:21,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:21,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:21,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=142}] handler.RSProcedureHandler(58): pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=142 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:21,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 is 50, key is test_row_0/C:col10/1734043039946/Put/seqid=0 2024-12-12T22:37:21,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742384_1560 (size=12151) 2024-12-12T22:37:21,916 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 2024-12-12T22:37:21,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/cd2785bda46b4afd9a0cbbfa13490ae2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2 2024-12-12T22:37:21,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2, entries=150, sequenceid=177, filesize=30.4 K 2024-12-12T22:37:21,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3ac647a4de0049e6ab768c1fc90194af as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af 2024-12-12T22:37:21,984 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af, entries=150, sequenceid=177, filesize=11.9 K 2024-12-12T22:37:21,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 2024-12-12T22:37:22,007 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,011 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=142 2024-12-12T22:37:22,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:22,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:22,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:22,012 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=142}] handler.RSProcedureHandler(58): pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:22,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=142 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:22,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=142 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:22,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5, entries=150, sequenceid=177, filesize=11.9 K 2024-12-12T22:37:22,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 487bd01b47fe4fd77cb2b4619f92faba in 951ms, sequenceid=177, compaction requested=false 2024-12-12T22:37:22,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:22,167 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=142 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:22,168 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:22,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:22,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212170dc4d9b9ce4fcbbfe8062ffc147406_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043041102/Put/seqid=0 2024-12-12T22:37:22,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742385_1561 (size=12304) 2024-12-12T22:37:22,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:22,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:22,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:22,252 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212170dc4d9b9ce4fcbbfe8062ffc147406_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212170dc4d9b9ce4fcbbfe8062ffc147406_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:22,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/71109f3ec11d41319db82e53cbaa82cf, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:22,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/71109f3ec11d41319db82e53cbaa82cf is 175, key is test_row_0/A:col10/1734043041102/Put/seqid=0 2024-12-12T22:37:22,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742386_1562 (size=31105) 2024-12-12T22:37:22,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043102287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043102287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,297 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043102292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043102292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043102296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043102396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043102398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043102401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043102407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043102408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043102598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043102602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043102604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043102616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043102629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:22,686 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/71109f3ec11d41319db82e53cbaa82cf 2024-12-12T22:37:22,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/713df1159a0a4f5b92f3e5cc48c9b137 is 50, key is test_row_0/B:col10/1734043041102/Put/seqid=0 2024-12-12T22:37:22,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742387_1563 (size=12151) 2024-12-12T22:37:22,763 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/713df1159a0a4f5b92f3e5cc48c9b137 2024-12-12T22:37:22,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/bfd14a35ed9d478ba5d0b1ffd9c19222 is 50, key is test_row_0/C:col10/1734043041102/Put/seqid=0 2024-12-12T22:37:22,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742388_1564 (size=12151) 2024-12-12T22:37:22,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043102904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043102906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043102921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043102923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:22,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:22,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043102948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,265 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/bfd14a35ed9d478ba5d0b1ffd9c19222 2024-12-12T22:37:23,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/71109f3ec11d41319db82e53cbaa82cf as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf 2024-12-12T22:37:23,288 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf, entries=150, sequenceid=197, filesize=30.4 K 2024-12-12T22:37:23,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/713df1159a0a4f5b92f3e5cc48c9b137 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137 2024-12-12T22:37:23,301 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137, entries=150, sequenceid=197, filesize=11.9 K 2024-12-12T22:37:23,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/bfd14a35ed9d478ba5d0b1ffd9c19222 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222 2024-12-12T22:37:23,317 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222, entries=150, sequenceid=197, filesize=11.9 K 2024-12-12T22:37:23,318 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 487bd01b47fe4fd77cb2b4619f92faba in 1150ms, sequenceid=197, compaction requested=true 2024-12-12T22:37:23,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:23,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:23,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-12T22:37:23,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-12T22:37:23,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-12T22:37:23,331 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7820 sec 2024-12-12T22:37:23,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=141, table=TestAcidGuarantees in 1.8070 sec 2024-12-12T22:37:23,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:23,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:23,414 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:23,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124ea0d1899bd143ae8de6cdde92ed0b29_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:23,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742389_1565 (size=14794) 2024-12-12T22:37:23,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043103448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,452 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043103448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043103452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043103450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043103453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043103553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043103553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043103554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043103567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-12T22:37:23,644 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 141 completed 2024-12-12T22:37:23,656 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=143, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees 2024-12-12T22:37:23,660 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=143, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:23,661 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=143, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:23,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:23,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:23,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043103756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043103756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043103756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:23,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043103783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-12T22:37:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:23,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,848 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:23,852 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412124ea0d1899bd143ae8de6cdde92ed0b29_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124ea0d1899bd143ae8de6cdde92ed0b29_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:23,853 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d2cf47705b4f45f19cb382bb33aaee9f, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:23,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d2cf47705b4f45f19cb382bb33aaee9f is 175, key is test_row_0/A:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742390_1566 (size=39749) 2024-12-12T22:37:23,888 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=219, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d2cf47705b4f45f19cb382bb33aaee9f 2024-12-12T22:37:23,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0abd28d87c2546c6ad5f6aa8a5566ebc is 50, key is test_row_0/B:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742391_1567 (size=12151) 2024-12-12T22:37:23,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:23,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0abd28d87c2546c6ad5f6aa8a5566ebc 2024-12-12T22:37:23,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:23,983 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-12T22:37:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:23,984 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] handler.RSProcedureHandler(58): pid=144 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=144 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=144 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:23,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0718b858e54340499c3d425c3a120495 is 50, key is test_row_0/C:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:24,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742392_1568 (size=12151) 2024-12-12T22:37:24,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0718b858e54340499c3d425c3a120495 2024-12-12T22:37:24,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d2cf47705b4f45f19cb382bb33aaee9f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f 2024-12-12T22:37:24,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043104060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043104064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043104068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f, entries=200, sequenceid=219, filesize=38.8 K 2024-12-12T22:37:24,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/0abd28d87c2546c6ad5f6aa8a5566ebc as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc 2024-12-12T22:37:24,081 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T22:37:24,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0718b858e54340499c3d425c3a120495 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495 2024-12-12T22:37:24,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495, entries=150, sequenceid=219, filesize=11.9 K 2024-12-12T22:37:24,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043104089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 487bd01b47fe4fd77cb2b4619f92faba in 686ms, sequenceid=219, compaction requested=true 2024-12-12T22:37:24,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:24,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:24,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:24,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:24,099 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:24,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:24,099 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:24,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:24,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:24,101 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133406 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:24,101 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:24,101 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:24,101 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=130.3 K 2024-12-12T22:37:24,101 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:24,101 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f] 2024-12-12T22:37:24,102 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting e61063bfe0844183b6b50b82c7099fca, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:24,102 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd2785bda46b4afd9a0cbbfa13490ae2, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734043039943 2024-12-12T22:37:24,102 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71109f3ec11d41319db82e53cbaa82cf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734043041102 2024-12-12T22:37:24,103 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d2cf47705b4f45f19cb382bb33aaee9f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:24,104 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:24,104 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:24,104 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:24,104 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/69f6455fe8064e54be7d6f3e48814cb4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=47.8 K 2024-12-12T22:37:24,104 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 69f6455fe8064e54be7d6f3e48814cb4, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:24,105 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ac647a4de0049e6ab768c1fc90194af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734043039943 2024-12-12T22:37:24,106 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 713df1159a0a4f5b92f3e5cc48c9b137, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734043041102 2024-12-12T22:37:24,106 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0abd28d87c2546c6ad5f6aa8a5566ebc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:24,128 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:24,132 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212470065ad33b94bf29d2953c83c093d39_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:24,133 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:24,133 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/c43d46d2a85e4bf4b7fff230c68e9348 is 50, key is test_row_0/B:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:24,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212470065ad33b94bf29d2953c83c093d39_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:24,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212470065ad33b94bf29d2953c83c093d39_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:24,146 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=144 2024-12-12T22:37:24,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:24,146 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:24,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:24,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742393_1569 (size=12629) 2024-12-12T22:37:24,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125bc83388cb9b4dfea9bd03efefc1dfb4_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043043444/Put/seqid=0 2024-12-12T22:37:24,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742394_1570 (size=4469) 2024-12-12T22:37:24,189 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#479 average throughput is 0.40 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:24,189 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/b296948fa4694a268d54be9b5a0e84db is 175, key is test_row_0/A:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:24,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742395_1571 (size=12304) 2024-12-12T22:37:24,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742396_1572 (size=31583) 2024-12-12T22:37:24,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:24,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:24,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043104566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043104566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043104566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043104588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043104592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,599 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/c43d46d2a85e4bf4b7fff230c68e9348 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/c43d46d2a85e4bf4b7fff230c68e9348 2024-12-12T22:37:24,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:24,621 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into c43d46d2a85e4bf4b7fff230c68e9348(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:24,621 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:24,621 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=12, startTime=1734043044099; duration=0sec 2024-12-12T22:37:24,621 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:24,621 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:24,621 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-12T22:37:24,621 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125bc83388cb9b4dfea9bd03efefc1dfb4_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125bc83388cb9b4dfea9bd03efefc1dfb4_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:24,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/26bcc0733632474ca5a92b6f3f63b278, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:24,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/26bcc0733632474ca5a92b6f3f63b278 is 175, key is test_row_0/A:col10/1734043043444/Put/seqid=0 2024-12-12T22:37:24,627 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-12T22:37:24,628 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:24,628 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:24,628 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/106f38c0972149e6bf06f9b73f4f65ad, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=47.8 K 2024-12-12T22:37:24,628 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 106f38c0972149e6bf06f9b73f4f65ad, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1734043038776 2024-12-12T22:37:24,631 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 79aa49b7c2db4fd1bfc84d1b8da97ad5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1734043039943 2024-12-12T22:37:24,636 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting bfd14a35ed9d478ba5d0b1ffd9c19222, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734043041102 2024-12-12T22:37:24,641 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0718b858e54340499c3d425c3a120495, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:24,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742397_1573 (size=31105) 2024-12-12T22:37:24,663 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/b296948fa4694a268d54be9b5a0e84db as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db 2024-12-12T22:37:24,670 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#482 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:24,671 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/c9c75977dd8144d7b2e99e447e48d248 is 50, key is test_row_0/C:col10/1734043042292/Put/seqid=0 2024-12-12T22:37:24,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043104669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:24,674 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into b296948fa4694a268d54be9b5a0e84db(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:24,674 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:24,674 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=12, startTime=1734043044099; duration=0sec 2024-12-12T22:37:24,674 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:24,674 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:24,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742398_1574 (size=12629) 2024-12-12T22:37:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:24,775 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/c9c75977dd8144d7b2e99e447e48d248 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c9c75977dd8144d7b2e99e447e48d248 2024-12-12T22:37:24,789 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into c9c75977dd8144d7b2e99e447e48d248(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:24,789 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:24,789 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=12, startTime=1734043044099; duration=0sec 2024-12-12T22:37:24,789 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:24,789 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:24,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:24,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043104878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,049 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/26bcc0733632474ca5a92b6f3f63b278 2024-12-12T22:37:25,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/643e14dbfe2a4c13a7a5064743d95dc3 is 50, key is test_row_0/B:col10/1734043043444/Put/seqid=0 2024-12-12T22:37:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742399_1575 (size=12151) 2024-12-12T22:37:25,085 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/643e14dbfe2a4c13a7a5064743d95dc3 2024-12-12T22:37:25,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0e1eb59567894093a58365763934de55 is 50, key is test_row_0/C:col10/1734043043444/Put/seqid=0 2024-12-12T22:37:25,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742400_1576 (size=12151) 2024-12-12T22:37:25,123 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0e1eb59567894093a58365763934de55 2024-12-12T22:37:25,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/26bcc0733632474ca5a92b6f3f63b278 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278 2024-12-12T22:37:25,144 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278, entries=150, sequenceid=233, filesize=30.4 K 2024-12-12T22:37:25,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/643e14dbfe2a4c13a7a5064743d95dc3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3 2024-12-12T22:37:25,175 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T22:37:25,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/0e1eb59567894093a58365763934de55 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55 2024-12-12T22:37:25,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043105184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,208 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55, entries=150, sequenceid=233, filesize=11.9 K 2024-12-12T22:37:25,209 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 487bd01b47fe4fd77cb2b4619f92faba in 1063ms, sequenceid=233, compaction requested=false 2024-12-12T22:37:25,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:25,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:25,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=144}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=144 2024-12-12T22:37:25,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=144 2024-12-12T22:37:25,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=144, resume processing ppid=143 2024-12-12T22:37:25,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, ppid=143, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5500 sec 2024-12-12T22:37:25,216 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=143, table=TestAcidGuarantees in 1.5590 sec 2024-12-12T22:37:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:25,589 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-12T22:37:25,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:25,589 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:25,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:25,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:25,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:25,590 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:25,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121278c4a7c7fce8497b878ad94e770f8f46_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:25,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043105627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043105631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043105632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043105638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742401_1577 (size=12354) 2024-12-12T22:37:25,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043105691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043105737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043105740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043105741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043105747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=143 2024-12-12T22:37:25,771 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 143 completed 2024-12-12T22:37:25,772 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees 2024-12-12T22:37:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:25,779 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=145, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:25,780 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=145, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:25,780 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:25,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:25,939 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:25,941 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:25,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:25,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043105941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043105943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043105945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:25,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:25,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043105955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,066 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:26,071 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121278c4a7c7fce8497b878ad94e770f8f46_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121278c4a7c7fce8497b878ad94e770f8f46_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:26,072 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/61703f2a91bc4438a64652d7c94f01f3, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/61703f2a91bc4438a64652d7c94f01f3 is 175, key is test_row_0/A:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:26,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742402_1578 (size=31155) 2024-12-12T22:37:26,103 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:26,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:26,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043106247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043106252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043106252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,262 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:26,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043106263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,266 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:26,429 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:26,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:26,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,430 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,503 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=260, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/61703f2a91bc4438a64652d7c94f01f3 2024-12-12T22:37:26,532 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/e2683a2640d840acaa07b24d5a7933ef is 50, key is test_row_0/B:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:26,582 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:26,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:26,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] handler.RSProcedureHandler(58): pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=146 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=146 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:26,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742403_1579 (size=12201) 2024-12-12T22:37:26,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/e2683a2640d840acaa07b24d5a7933ef 2024-12-12T22:37:26,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e94070d70266468a97b033622b1239be is 50, key is test_row_0/C:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:26,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742404_1580 (size=12201) 2024-12-12T22:37:26,651 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e94070d70266468a97b033622b1239be 2024-12-12T22:37:26,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/61703f2a91bc4438a64652d7c94f01f3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3 2024-12-12T22:37:26,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3, entries=150, sequenceid=260, filesize=30.4 K 2024-12-12T22:37:26,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/e2683a2640d840acaa07b24d5a7933ef as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef 2024-12-12T22:37:26,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043106697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef, entries=150, sequenceid=260, filesize=11.9 K 2024-12-12T22:37:26,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/e94070d70266468a97b033622b1239be as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be 2024-12-12T22:37:26,739 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be, entries=150, sequenceid=260, filesize=11.9 K 2024-12-12T22:37:26,742 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,743 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 487bd01b47fe4fd77cb2b4619f92faba in 1153ms, sequenceid=260, compaction requested=true 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:26,743 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:26,743 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:26,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=146 2024-12-12T22:37:26,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:26,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,743 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:26,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:26,745 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36981 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:26,745 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:26,745 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,746 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/c43d46d2a85e4bf4b7fff230c68e9348, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.1 K 2024-12-12T22:37:26,746 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93843 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:26,746 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:26,746 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,746 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=91.6 K 2024-12-12T22:37:26,746 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,746 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3] 2024-12-12T22:37:26,747 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b296948fa4694a268d54be9b5a0e84db, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:26,748 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c43d46d2a85e4bf4b7fff230c68e9348, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:26,748 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26bcc0733632474ca5a92b6f3f63b278, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734043043444 2024-12-12T22:37:26,748 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 643e14dbfe2a4c13a7a5064743d95dc3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734043043444 2024-12-12T22:37:26,748 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61703f2a91bc4438a64652d7c94f01f3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:26,755 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e2683a2640d840acaa07b24d5a7933ef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:26,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212162936491f8a486b8ad6cf2d1787dcf3_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043045624/Put/seqid=0 2024-12-12T22:37:26,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:26,776 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,785 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:26,785 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/b129c36064c94a99ba6e379feebdd875 is 50, key is test_row_0/B:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:26,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742405_1581 (size=12454) 2024-12-12T22:37:26,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:26,812 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412122b9a40eb11ee4d66a0e7f9b5217f79be_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,813 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412122b9a40eb11ee4d66a0e7f9b5217f79be_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,814 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412122b9a40eb11ee4d66a0e7f9b5217f79be_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,832 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212162936491f8a486b8ad6cf2d1787dcf3_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212162936491f8a486b8ad6cf2d1787dcf3_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:26,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742406_1582 (size=12781) 2024-12-12T22:37:26,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d1f7aaee84a94902a6676a90831102c2, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:26,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d1f7aaee84a94902a6676a90831102c2 is 175, key is test_row_0/A:col10/1734043045624/Put/seqid=0 2024-12-12T22:37:26,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742407_1583 (size=4469) 2024-12-12T22:37:26,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742408_1584 (size=31255) 2024-12-12T22:37:26,853 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=273, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d1f7aaee84a94902a6676a90831102c2 2024-12-12T22:37:26,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043106887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043106888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043106891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043106892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,907 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/b129c36064c94a99ba6e379feebdd875 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b129c36064c94a99ba6e379feebdd875 2024-12-12T22:37:26,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/b95b2c6196fe48209fb8c495e50b4f64 is 50, key is test_row_0/B:col10/1734043045624/Put/seqid=0 2024-12-12T22:37:26,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742409_1585 (size=12301) 2024-12-12T22:37:26,938 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into b129c36064c94a99ba6e379feebdd875(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:26,939 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:26,939 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043046743; duration=0sec 2024-12-12T22:37:26,939 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:26,939 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:26,939 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:26,951 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36981 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:26,951 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:26,951 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:26,951 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c9c75977dd8144d7b2e99e447e48d248, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.1 K 2024-12-12T22:37:26,956 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c9c75977dd8144d7b2e99e447e48d248, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1734043042286 2024-12-12T22:37:26,957 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e1eb59567894093a58365763934de55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734043043444 2024-12-12T22:37:26,959 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting e94070d70266468a97b033622b1239be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:26,979 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#492 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:26,980 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/5f6cd44feba54f57aef6e7f6c7a5e0b7 is 50, key is test_row_0/C:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:26,995 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043106992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043106997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043106998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:26,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:26,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043106998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742410_1586 (size=12781) 2024-12-12T22:37:27,047 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/5f6cd44feba54f57aef6e7f6c7a5e0b7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5f6cd44feba54f57aef6e7f6c7a5e0b7 2024-12-12T22:37:27,085 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 5f6cd44feba54f57aef6e7f6c7a5e0b7(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:27,085 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:27,085 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043046743; duration=0sec 2024-12-12T22:37:27,085 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:27,085 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:27,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043107199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043107199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043107200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043107202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,259 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#489 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:27,260 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/162cabfee7be49abb6325c54c8539ec5 is 175, key is test_row_0/A:col10/1734043044563/Put/seqid=0 2024-12-12T22:37:27,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742411_1587 (size=31735) 2024-12-12T22:37:27,337 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/162cabfee7be49abb6325c54c8539ec5 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5 2024-12-12T22:37:27,339 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/b95b2c6196fe48209fb8c495e50b4f64 2024-12-12T22:37:27,387 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into 162cabfee7be49abb6325c54c8539ec5(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:27,387 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:27,387 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043046743; duration=0sec 2024-12-12T22:37:27,387 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:27,387 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:27,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/ed26d6bc36fe4472b55669f16e7c1a23 is 50, key is test_row_0/C:col10/1734043045624/Put/seqid=0 2024-12-12T22:37:27,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742412_1588 (size=12301) 2024-12-12T22:37:27,486 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/ed26d6bc36fe4472b55669f16e7c1a23 2024-12-12T22:37:27,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043107503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043107506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043107511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/d1f7aaee84a94902a6676a90831102c2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2 2024-12-12T22:37:27,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:27,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043107511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:27,524 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2, entries=150, sequenceid=273, filesize=30.5 K 2024-12-12T22:37:27,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/b95b2c6196fe48209fb8c495e50b4f64 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64 2024-12-12T22:37:27,538 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T22:37:27,543 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/ed26d6bc36fe4472b55669f16e7c1a23 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23 2024-12-12T22:37:27,568 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23, entries=150, sequenceid=273, filesize=12.0 K 2024-12-12T22:37:27,570 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 487bd01b47fe4fd77cb2b4619f92faba in 827ms, sequenceid=273, compaction requested=false 2024-12-12T22:37:27,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:27,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:27,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-12T22:37:27,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-12T22:37:27,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-12T22:37:27,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8030 sec 2024-12-12T22:37:27,589 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=145, table=TestAcidGuarantees in 1.8160 sec 2024-12-12T22:37:27,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-12T22:37:27,888 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-12T22:37:27,893 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:27,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees 2024-12-12T22:37:27,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:27,908 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=147, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:27,918 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=147, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:27,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:28,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:28,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-12T22:37:28,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:28,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:28,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:28,021 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043108035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043108038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043108046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043108047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212402c2e4dcc814f47bd6dfafb112cc22f_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:28,094 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:28,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,097 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742413_1589 (size=17534) 2024-12-12T22:37:28,131 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:28,149 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043108148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043108154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043108154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,164 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212402c2e4dcc814f47bd6dfafb112cc22f_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212402c2e4dcc814f47bd6dfafb112cc22f_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:28,179 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/446100dfd85b4c68966496f92fab60de, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:28,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/446100dfd85b4c68966496f92fab60de is 175, key is test_row_0/A:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:28,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043108203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:28,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742414_1590 (size=48639) 2024-12-12T22:37:28,227 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/446100dfd85b4c68966496f92fab60de 2024-12-12T22:37:28,271 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:28,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,281 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3e095b716f9248e9af8b120e13ba10a9 is 50, key is test_row_0/B:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742415_1591 (size=12301) 2024-12-12T22:37:28,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3e095b716f9248e9af8b120e13ba10a9 2024-12-12T22:37:28,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043108358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043108359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/887c384651a94a9780f838bc86535f69 is 50, key is test_row_0/C:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:28,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043108359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742416_1592 (size=12301) 2024-12-12T22:37:28,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043108414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:28,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:28,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:28,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043108663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,666 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043108663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043108665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043108708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,713 DEBUG [Thread-2295 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., hostname=1aef280cf0a8,36025,1734042873576, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor37.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-12T22:37:28,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:28,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043108722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:28,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] handler.RSProcedureHandler(58): pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=148 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=148 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:28,790 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/887c384651a94a9780f838bc86535f69 2024-12-12T22:37:28,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/446100dfd85b4c68966496f92fab60de as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de 2024-12-12T22:37:28,867 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de, entries=250, sequenceid=303, filesize=47.5 K 2024-12-12T22:37:28,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/3e095b716f9248e9af8b120e13ba10a9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9 2024-12-12T22:37:28,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T22:37:28,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/887c384651a94a9780f838bc86535f69 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69 2024-12-12T22:37:28,929 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69, entries=150, sequenceid=303, filesize=12.0 K 2024-12-12T22:37:28,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 487bd01b47fe4fd77cb2b4619f92faba in 915ms, sequenceid=303, compaction requested=true 2024-12-12T22:37:28,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:28,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:28,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:28,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:28,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:37:28,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:28,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:37:28,935 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:28,935 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:28,936 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37383 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:28,936 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111629 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:28,936 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:28,936 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:28,937 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,937 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,937 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=109.0 K 2024-12-12T22:37:28,937 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5f6cd44feba54f57aef6e7f6c7a5e0b7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.5 K 2024-12-12T22:37:28,937 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,937 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de] 2024-12-12T22:37:28,937 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f6cd44feba54f57aef6e7f6c7a5e0b7, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:28,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:28,937 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 162cabfee7be49abb6325c54c8539ec5, keycount=150, bloomtype=ROW, size=31.0 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:28,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=148 2024-12-12T22:37:28,940 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed26d6bc36fe4472b55669f16e7c1a23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734043045605 2024-12-12T22:37:28,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:28,941 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:28,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:28,943 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting d1f7aaee84a94902a6676a90831102c2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734043045605 2024-12-12T22:37:28,944 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 887c384651a94a9780f838bc86535f69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:28,946 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 446100dfd85b4c68966496f92fab60de, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:28,977 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:28,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129a3a1f5f78b148df9ad96d8c15bc3f1f_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043048035/Put/seqid=0 2024-12-12T22:37:28,984 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:28,984 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/98ff1132eaee475cb0538e5595dbc736 is 50, key is test_row_0/C:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:28,996 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121270d73e33c0274fcfae169fd48d751860_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:28,997 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121270d73e33c0274fcfae169fd48d751860_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:28,998 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121270d73e33c0274fcfae169fd48d751860_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:29,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742417_1593 (size=12454) 2024-12-12T22:37:29,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:29,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:29,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742418_1594 (size=12983) 2024-12-12T22:37:29,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742419_1595 (size=4469) 2024-12-12T22:37:29,041 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#498 average throughput is 0.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:29,041 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/eee9a3078577493eab3a62ee4132f712 is 175, key is test_row_0/A:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:29,059 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129a3a1f5f78b148df9ad96d8c15bc3f1f_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a3a1f5f78b148df9ad96d8c15bc3f1f_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:29,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742420_1596 (size=31937) 2024-12-12T22:37:29,076 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/98ff1132eaee475cb0538e5595dbc736 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/98ff1132eaee475cb0538e5595dbc736 2024-12-12T22:37:29,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/c220ff328c414bf9866460a1971ead60, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:29,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/c220ff328c414bf9866460a1971ead60 is 175, key is test_row_0/A:col10/1734043048035/Put/seqid=0 2024-12-12T22:37:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742421_1597 (size=31255) 2024-12-12T22:37:29,135 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 98ff1132eaee475cb0538e5595dbc736(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:29,135 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:29,136 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043048935; duration=0sec 2024-12-12T22:37:29,136 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:29,136 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:29,136 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:29,141 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/eee9a3078577493eab3a62ee4132f712 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712 2024-12-12T22:37:29,148 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37383 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:29,148 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:29,148 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:29,148 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b129c36064c94a99ba6e379feebdd875, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.5 K 2024-12-12T22:37:29,148 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b129c36064c94a99ba6e379feebdd875, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1734043044554 2024-12-12T22:37:29,150 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b95b2c6196fe48209fb8c495e50b4f64, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1734043045605 2024-12-12T22:37:29,152 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e095b716f9248e9af8b120e13ba10a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:29,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:29,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:29,187 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into eee9a3078577493eab3a62ee4132f712(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:29,187 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:29,187 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043048934; duration=0sec 2024-12-12T22:37:29,187 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:29,187 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:29,215 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:29,216 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/41b3e841fa664664bf28924aad9eba50 is 50, key is test_row_0/B:col10/1734043048011/Put/seqid=0 2024-12-12T22:37:29,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742422_1598 (size=12983) 2024-12-12T22:37:29,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043109267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043109279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043109282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043109287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043109391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043109392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043109394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,405 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043109403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,530 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/c220ff328c414bf9866460a1971ead60 2024-12-12T22:37:29,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8582cd52bdb34863af3b915ce6e7cc87 is 50, key is test_row_0/B:col10/1734043048035/Put/seqid=0 2024-12-12T22:37:29,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043109600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043109600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043109608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043109616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742423_1599 (size=12301) 2024-12-12T22:37:29,643 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8582cd52bdb34863af3b915ce6e7cc87 2024-12-12T22:37:29,687 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/41b3e841fa664664bf28924aad9eba50 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/41b3e841fa664664bf28924aad9eba50 2024-12-12T22:37:29,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/c48d5a96f6d04c37afb55936544149e3 is 50, key is test_row_0/C:col10/1734043048035/Put/seqid=0 2024-12-12T22:37:29,715 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 41b3e841fa664664bf28924aad9eba50(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:29,715 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:29,715 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043048934; duration=0sec 2024-12-12T22:37:29,715 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:29,715 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:29,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742424_1600 (size=12301) 2024-12-12T22:37:29,743 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/c48d5a96f6d04c37afb55936544149e3 2024-12-12T22:37:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/c220ff328c414bf9866460a1971ead60 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60 2024-12-12T22:37:29,751 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60, entries=150, sequenceid=313, filesize=30.5 K 2024-12-12T22:37:29,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8582cd52bdb34863af3b915ce6e7cc87 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87 2024-12-12T22:37:29,756 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87, entries=150, sequenceid=313, filesize=12.0 K 2024-12-12T22:37:29,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/c48d5a96f6d04c37afb55936544149e3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3 2024-12-12T22:37:29,764 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3, entries=150, sequenceid=313, filesize=12.0 K 2024-12-12T22:37:29,776 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 487bd01b47fe4fd77cb2b4619f92faba in 835ms, sequenceid=313, compaction requested=false 2024-12-12T22:37:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:29,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-12-12T22:37:29,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=148 2024-12-12T22:37:29,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-12T22:37:29,793 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8650 sec 2024-12-12T22:37:29,810 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=147, table=TestAcidGuarantees in 1.9060 sec 2024-12-12T22:37:29,912 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:37:29,912 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:29,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:29,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:29,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:29,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:29,913 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:29,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:29,941 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d36f3f19a4454d40a28611796ca1ded0_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:29,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043109939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043109944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043109942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:29,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043109947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742425_1601 (size=14994) 2024-12-12T22:37:29,965 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:29,969 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d36f3f19a4454d40a28611796ca1ded0_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d36f3f19a4454d40a28611796ca1ded0_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:29,974 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a9dbcb5bbf4d4504bb97d24b6907dd17, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:29,974 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a9dbcb5bbf4d4504bb97d24b6907dd17 is 175, key is test_row_0/A:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:29,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742426_1602 (size=39949) 2024-12-12T22:37:30,005 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a9dbcb5bbf4d4504bb97d24b6907dd17 2024-12-12T22:37:30,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-12T22:37:30,018 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 147 completed 2024-12-12T22:37:30,020 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:30,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees 2024-12-12T22:37:30,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:30,043 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:30,048 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=149, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:30,048 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:30,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043110051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043110051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043110051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043110054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/6636ecce53fa4e6fa06382ee214563b2 is 50, key is test_row_0/B:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:30,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742427_1603 (size=12301) 2024-12-12T22:37:30,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/6636ecce53fa4e6fa06382ee214563b2 2024-12-12T22:37:30,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:30,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/796356826e624244a40a5290a4021892 is 50, key is test_row_0/C:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:30,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742428_1604 (size=12301) 2024-12-12T22:37:30,205 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T22:37:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043110254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043110254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043110256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043110260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:30,363 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T22:37:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T22:37:30,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:30,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,531 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043110564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043110567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043110569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:30,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043110571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/796356826e624244a40a5290a4021892 2024-12-12T22:37:30,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/a9dbcb5bbf4d4504bb97d24b6907dd17 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17 2024-12-12T22:37:30,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:30,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17, entries=200, sequenceid=343, filesize=39.0 K 2024-12-12T22:37:30,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/6636ecce53fa4e6fa06382ee214563b2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2 2024-12-12T22:37:30,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2, entries=150, sequenceid=343, filesize=12.0 K 2024-12-12T22:37:30,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/796356826e624244a40a5290a4021892 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892 2024-12-12T22:37:30,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T22:37:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] handler.RSProcedureHandler(58): pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=150 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=150 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:30,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892, entries=150, sequenceid=343, filesize=12.0 K 2024-12-12T22:37:30,719 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=53.67 KB/54960 for 487bd01b47fe4fd77cb2b4619f92faba in 807ms, sequenceid=343, compaction requested=true 2024-12-12T22:37:30,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:30,720 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:30,727 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:30,728 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:30,728 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:30,731 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:30,731 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:30,731 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,731 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=100.7 K 2024-12-12T22:37:30,731 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,731 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17] 2024-12-12T22:37:30,736 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting eee9a3078577493eab3a62ee4132f712, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:30,739 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting c220ff328c414bf9866460a1971ead60, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1734043048035 2024-12-12T22:37:30,740 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:30,740 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:30,740 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,740 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/41b3e841fa664664bf28924aad9eba50, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.7 K 2024-12-12T22:37:30,743 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 41b3e841fa664664bf28924aad9eba50, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:30,743 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9dbcb5bbf4d4504bb97d24b6907dd17, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:30,747 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 8582cd52bdb34863af3b915ce6e7cc87, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1734043048035 2024-12-12T22:37:30,752 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 6636ecce53fa4e6fa06382ee214563b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:30,791 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:30,792 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/71a8776895b545a19a7d1421108c3353 is 50, key is test_row_0/B:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:30,802 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:30,808 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212bb4360f243e14a67a21a264f64fa7eb4_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:30,810 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212bb4360f243e14a67a21a264f64fa7eb4_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:30,810 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212bb4360f243e14a67a21a264f64fa7eb4_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:30,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742430_1606 (size=4469) 2024-12-12T22:37:30,819 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#507 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:30,820 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/2a7a4231c7fe4d508af9ecb8f9eab552 is 175, key is test_row_0/A:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:30,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742429_1605 (size=13085) 2024-12-12T22:37:30,832 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/71a8776895b545a19a7d1421108c3353 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/71a8776895b545a19a7d1421108c3353 2024-12-12T22:37:30,837 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 71a8776895b545a19a7d1421108c3353(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:30,837 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:30,837 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043050728; duration=0sec 2024-12-12T22:37:30,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742431_1607 (size=32039) 2024-12-12T22:37:30,837 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:30,837 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:30,837 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:30,838 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:30,839 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:30,839 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,839 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/98ff1132eaee475cb0538e5595dbc736, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.7 K 2024-12-12T22:37:30,839 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 98ff1132eaee475cb0538e5595dbc736, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1734043046878 2024-12-12T22:37:30,840 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting c48d5a96f6d04c37afb55936544149e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1734043048035 2024-12-12T22:37:30,840 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 796356826e624244a40a5290a4021892, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:30,853 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/2a7a4231c7fe4d508af9ecb8f9eab552 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552 2024-12-12T22:37:30,853 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:30,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=150 2024-12-12T22:37:30,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:30,855 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:37:30,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:30,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:30,859 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#508 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:30,859 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into 2a7a4231c7fe4d508af9ecb8f9eab552(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:30,859 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:30,859 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043050720; duration=0sec 2024-12-12T22:37:30,859 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:30,859 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:30,860 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/49e1f3777ee24a999060461f358286f9 is 50, key is test_row_0/C:col10/1734043049260/Put/seqid=0 2024-12-12T22:37:30,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212438e2f7686274060954e52d6dca635d6_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043049934/Put/seqid=0 2024-12-12T22:37:30,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742432_1608 (size=13085) 2024-12-12T22:37:30,887 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/49e1f3777ee24a999060461f358286f9 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/49e1f3777ee24a999060461f358286f9 2024-12-12T22:37:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742433_1609 (size=12454) 2024-12-12T22:37:30,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:30,894 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 49e1f3777ee24a999060461f358286f9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:30,894 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:30,894 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043050728; duration=0sec 2024-12-12T22:37:30,894 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:30,894 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:30,902 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212438e2f7686274060954e52d6dca635d6_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212438e2f7686274060954e52d6dca635d6_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:30,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4db45c76a0194083952e154f4e112b91, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:30,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4db45c76a0194083952e154f4e112b91 is 175, key is test_row_0/A:col10/1734043049934/Put/seqid=0 2024-12-12T22:37:30,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742434_1610 (size=31255) 2024-12-12T22:37:30,936 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4db45c76a0194083952e154f4e112b91 2024-12-12T22:37:30,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/741859aca9ce4650881b868579aa0a93 is 50, key is test_row_0/B:col10/1734043049934/Put/seqid=0 2024-12-12T22:37:30,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742435_1611 (size=12301) 2024-12-12T22:37:31,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:31,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:31,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043111107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043111110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043111110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043111114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:31,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043111213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043111215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043111218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043111220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,235 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-12T22:37:31,374 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/741859aca9ce4650881b868579aa0a93 2024-12-12T22:37:31,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b987d7bbfbce4620a86ee1d956442e3a is 50, key is test_row_0/C:col10/1734043049934/Put/seqid=0 2024-12-12T22:37:31,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043111416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043111421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043111428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043111431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742436_1612 (size=12301) 2024-12-12T22:37:31,464 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b987d7bbfbce4620a86ee1d956442e3a 2024-12-12T22:37:31,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4db45c76a0194083952e154f4e112b91 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91 2024-12-12T22:37:31,527 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91, entries=150, sequenceid=355, filesize=30.5 K 2024-12-12T22:37:31,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/741859aca9ce4650881b868579aa0a93 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93 2024-12-12T22:37:31,539 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93, entries=150, sequenceid=355, filesize=12.0 K 2024-12-12T22:37:31,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b987d7bbfbce4620a86ee1d956442e3a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a 2024-12-12T22:37:31,545 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a, entries=150, sequenceid=355, filesize=12.0 K 2024-12-12T22:37:31,555 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 487bd01b47fe4fd77cb2b4619f92faba in 700ms, sequenceid=355, compaction requested=false 2024-12-12T22:37:31,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:31,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:31,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=150}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=150 2024-12-12T22:37:31,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=150 2024-12-12T22:37:31,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-12T22:37:31,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5090 sec 2024-12-12T22:37:31,560 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=149, table=TestAcidGuarantees in 1.5390 sec 2024-12-12T22:37:31,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:31,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:31,734 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:31,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043111741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043111755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043111757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127731449584fd4a3baeee0fa4e6cadf71_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:31,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043111759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742437_1613 (size=12454) 2024-12-12T22:37:31,800 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,808 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412127731449584fd4a3baeee0fa4e6cadf71_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127731449584fd4a3baeee0fa4e6cadf71_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:31,810 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/9e12389697cc4f14ad6eba95633ac15a, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:31,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/9e12389697cc4f14ad6eba95633ac15a is 175, key is test_row_0/A:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:31,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742438_1614 (size=31255) 2024-12-12T22:37:31,831 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=384, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/9e12389697cc4f14ad6eba95633ac15a 2024-12-12T22:37:31,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/65bd4e88e96246309b41d438ddeadbc3 is 50, key is test_row_0/B:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:31,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742439_1615 (size=12301) 2024-12-12T22:37:31,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043111864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043111864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/65bd4e88e96246309b41d438ddeadbc3 2024-12-12T22:37:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043111865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:31,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043111867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:31,920 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/d7eeca17c651425eaac8f1abf843de15 is 50, key is test_row_0/C:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:31,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742440_1616 (size=12301) 2024-12-12T22:37:31,936 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=384 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/d7eeca17c651425eaac8f1abf843de15 2024-12-12T22:37:31,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/9e12389697cc4f14ad6eba95633ac15a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a 2024-12-12T22:37:31,948 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a, entries=150, sequenceid=384, filesize=30.5 K 2024-12-12T22:37:31,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/65bd4e88e96246309b41d438ddeadbc3 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3 2024-12-12T22:37:31,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3, entries=150, sequenceid=384, filesize=12.0 K 2024-12-12T22:37:31,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/d7eeca17c651425eaac8f1abf843de15 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15 2024-12-12T22:37:31,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,963 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15, entries=150, sequenceid=384, filesize=12.0 K 2024-12-12T22:37:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 487bd01b47fe4fd77cb2b4619f92faba in 239ms, sequenceid=384, compaction requested=true 2024-12-12T22:37:31,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:31,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:31,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:31,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:31,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-12T22:37:31,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:31,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-12-12T22:37:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,969 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,970 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,979 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:31,979 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:31,979 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:31,979 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=92.3 K 2024-12-12T22:37:31,979 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:31,979 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a] 2024-12-12T22:37:31,980 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a7a4231c7fe4d508af9ecb8f9eab552, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:31,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,990 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 4db45c76a0194083952e154f4e112b91, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734043049931 2024-12-12T22:37:31,991 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:31,991 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:31,991 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e12389697cc4f14ad6eba95633ac15a, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:31,991 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:31,991 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/49e1f3777ee24a999060461f358286f9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.8 K 2024-12-12T22:37:31,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,995 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49e1f3777ee24a999060461f358286f9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:31,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:31,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,003 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting b987d7bbfbce4620a86ee1d956442e3a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734043049931 2024-12-12T22:37:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,007 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7eeca17c651425eaac8f1abf843de15, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:32,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,040 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,042 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#515 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:32,042 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/552fd15a1744444d958bbf791a61843f is 50, key is test_row_0/C:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:32,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,055 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241212f67d40173a874fd2bf5add3b15a09f7a_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,056 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241212f67d40173a874fd2bf5add3b15a09f7a_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,057 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212f67d40173a874fd2bf5add3b15a09f7a_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:32,077 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:32,077 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,093 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129493f28f3d804a968da2a6506f346000_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043052074/Put/seqid=0 2024-12-12T22:37:32,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742442_1618 (size=4469) 2024-12-12T22:37:32,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742441_1617 (size=13187) 2024-12-12T22:37:32,108 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#516 average throughput is 0.36 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:32,108 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/6c67137d63cf4a3895892097805d6b44 is 175, key is test_row_0/A:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:32,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742443_1619 (size=12454) 2024-12-12T22:37:32,116 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,119 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/552fd15a1744444d958bbf791a61843f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/552fd15a1744444d958bbf791a61843f 2024-12-12T22:37:32,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043112120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742444_1620 (size=32141) 2024-12-12T22:37:32,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043112121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,128 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412129493f28f3d804a968da2a6506f346000_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129493f28f3d804a968da2a6506f346000_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:32,129 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/918289933b8740aca956278446a56295, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/918289933b8740aca956278446a56295 is 175, key is test_row_0/A:col10/1734043052074/Put/seqid=0 2024-12-12T22:37:32,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043112127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,131 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 552fd15a1744444d958bbf791a61843f(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:32,131 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:32,131 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043051968; duration=0sec 2024-12-12T22:37:32,131 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:32,131 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:32,131 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:32,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:32,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:32,133 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,133 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/71a8776895b545a19a7d1421108c3353, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.8 K 2024-12-12T22:37:32,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71a8776895b545a19a7d1421108c3353, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1734043049260 2024-12-12T22:37:32,133 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 741859aca9ce4650881b868579aa0a93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1734043049931 2024-12-12T22:37:32,134 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65bd4e88e96246309b41d438ddeadbc3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:32,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-12-12T22:37:32,137 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-12-12T22:37:32,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043112135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,140 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-12T22:37:32,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees 2024-12-12T22:37:32,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:32,142 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-12T22:37:32,143 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=151, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-12T22:37:32,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-12T22:37:32,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742445_1621 (size=31255) 2024-12-12T22:37:32,155 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#518 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:32,155 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/1ecb459a5a4347e4b31b894655f00069 is 50, key is test_row_0/B:col10/1734043051720/Put/seqid=0 2024-12-12T22:37:32,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742446_1622 (size=13187) 2024-12-12T22:37:32,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043112231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043112231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043112235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043112244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:32,303 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:32,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043112435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043112439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043112447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:32,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043112451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,466 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,467 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:32,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:32,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,553 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=395, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/918289933b8740aca956278446a56295 2024-12-12T22:37:32,554 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/6c67137d63cf4a3895892097805d6b44 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44 2024-12-12T22:37:32,577 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into 6c67137d63cf4a3895892097805d6b44(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:32,577 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:32,577 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043051967; duration=0sec 2024-12-12T22:37:32,577 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:32,577 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:32,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8bb20119d4d247ea967b469b9c43826d is 50, key is test_row_0/B:col10/1734043052074/Put/seqid=0 2024-12-12T22:37:32,600 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/1ecb459a5a4347e4b31b894655f00069 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/1ecb459a5a4347e4b31b894655f00069 2024-12-12T22:37:32,606 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 1ecb459a5a4347e4b31b894655f00069(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:32,606 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:32,607 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043051967; duration=0sec 2024-12-12T22:37:32,607 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:32,607 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:32,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742447_1623 (size=12301) 2024-12-12T22:37:32,626 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:32,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8bb20119d4d247ea967b469b9c43826d 2024-12-12T22:37:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff is 50, key is test_row_0/C:col10/1734043052074/Put/seqid=0 2024-12-12T22:37:32,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742448_1624 (size=12301) 2024-12-12T22:37:32,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=395 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff 2024-12-12T22:37:32,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/918289933b8740aca956278446a56295 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295 2024-12-12T22:37:32,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295, entries=150, sequenceid=395, filesize=30.5 K 2024-12-12T22:37:32,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/8bb20119d4d247ea967b469b9c43826d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d 2024-12-12T22:37:32,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d, entries=150, sequenceid=395, filesize=12.0 K 2024-12-12T22:37:32,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff 2024-12-12T22:37:32,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff, entries=150, sequenceid=395, filesize=12.0 K 2024-12-12T22:37:32,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 487bd01b47fe4fd77cb2b4619f92faba in 630ms, sequenceid=395, compaction requested=false 2024-12-12T22:37:32,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:32,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:32,737 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:32,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043112747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043112750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043112750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043112750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:32,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043112767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,780 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,781 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212086d22b4f3854e6b8a8ac46585ddfe06_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:32,781 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:32,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:32,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,782 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742449_1625 (size=14994) 2024-12-12T22:37:32,841 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:32,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043112853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043112854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:32,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043112859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,868 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212086d22b4f3854e6b8a8ac46585ddfe06_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212086d22b4f3854e6b8a8ac46585ddfe06_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:32,873 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/0e6b6c2d0c994d4ca6a0310f26570080, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:32,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/0e6b6c2d0c994d4ca6a0310f26570080 is 175, key is test_row_0/A:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:32,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742450_1626 (size=39949) 2024-12-12T22:37:32,934 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:32,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:32,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:32,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:32,934 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:32,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043113057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043113060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043113068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,176 DEBUG [Thread-2298 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5bb75907 to 127.0.0.1:50645 2024-12-12T22:37:33,176 DEBUG [Thread-2298 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:33,177 DEBUG [Thread-2300 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c1d3a95 to 127.0.0.1:50645 2024-12-12T22:37:33,177 DEBUG [Thread-2300 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:33,178 DEBUG [Thread-2306 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42aacb30 to 127.0.0.1:50645 2024-12-12T22:37:33,178 DEBUG [Thread-2306 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:33,179 DEBUG [Thread-2304 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x404bb685 to 127.0.0.1:50645 2024-12-12T22:37:33,179 DEBUG [Thread-2304 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:33,180 DEBUG [Thread-2302 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x503a7d2e to 127.0.0.1:50645 2024-12-12T22:37:33,180 DEBUG [Thread-2302 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:33,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48742 deadline: 1734043113256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:33,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48734 deadline: 1734043113275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,302 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=425, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/0e6b6c2d0c994d4ca6a0310f26570080 2024-12-12T22:37:33,308 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/168aefac4e064cd8966dfc2166088f17 is 50, key is test_row_0/B:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:33,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742451_1627 (size=12301) 2024-12-12T22:37:33,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043113361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043113364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043113372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,395 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,396 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,547 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,548 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,700 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,726 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/168aefac4e064cd8966dfc2166088f17 2024-12-12T22:37:33,732 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b9560755c836438fbca443bc93424221 is 50, key is test_row_0/C:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:33,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742452_1628 (size=12301) 2024-12-12T22:37:33,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:33,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:33,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:33,854 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-1 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:33,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48784 deadline: 1734043113864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48718 deadline: 1734043113865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:33,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-12T22:37:33,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:48780 deadline: 1734043113874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:34,005 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:34,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:34,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:34,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] handler.RSProcedureHandler(58): pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:34,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-2 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=152 java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:34,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4114): Remote procedure failed, pid=152 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-12T22:37:34,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=425 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b9560755c836438fbca443bc93424221 2024-12-12T22:37:34,145 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/0e6b6c2d0c994d4ca6a0310f26570080 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080 2024-12-12T22:37:34,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080, entries=200, sequenceid=425, filesize=39.0 K 2024-12-12T22:37:34,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/168aefac4e064cd8966dfc2166088f17 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17 2024-12-12T22:37:34,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17, entries=150, sequenceid=425, filesize=12.0 K 2024-12-12T22:37:34,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/b9560755c836438fbca443bc93424221 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221 2024-12-12T22:37:34,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221, entries=150, sequenceid=425, filesize=12.0 K 2024-12-12T22:37:34,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:34,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 487bd01b47fe4fd77cb2b4619f92faba in 1421ms, sequenceid=425, compaction requested=true 2024-12-12T22:37:34,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:34,158 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:34,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36025 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=152 2024-12-12T22:37:34,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,158 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-12T22:37:34,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:A, priority=-2147483648, current under compaction store size is 1 2024-12-12T22:37:34,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:34,158 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:34,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:34,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:B, priority=-2147483648, current under compaction store size is 2 2024-12-12T22:37:34,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:34,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:34,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 487bd01b47fe4fd77cb2b4619f92faba:C, priority=-2147483648, current under compaction store size is 3 2024-12-12T22:37:34,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:34,159 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:34,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:34,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:34,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:34,159 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103345 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:34,159 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/A is initiating minor compaction (all files) 2024-12-12T22:37:34,159 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/A in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,160 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=100.9 K 2024-12-12T22:37:34,160 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. files: [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080] 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6c67137d63cf4a3895892097805d6b44, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/B is initiating minor compaction (all files) 2024-12-12T22:37:34,160 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/B in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,160 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/1ecb459a5a4347e4b31b894655f00069, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.9 K 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 918289933b8740aca956278446a56295, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1734043051739 2024-12-12T22:37:34,160 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ecb459a5a4347e4b31b894655f00069, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:34,161 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e6b6c2d0c994d4ca6a0310f26570080, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1734043052116 2024-12-12T22:37:34,161 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bb20119d4d247ea967b469b9c43826d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1734043051739 2024-12-12T22:37:34,161 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 168aefac4e064cd8966dfc2166088f17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1734043052116 2024-12-12T22:37:34,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d0e346e1edf44c2aab73fe87705e1ef6_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043052748/Put/seqid=0 2024-12-12T22:37:34,180 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:34,181 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#B#compaction#526 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:34,182 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/72de4e266d164345a4a8a772431f7236 is 50, key is test_row_0/B:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:34,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742453_1629 (size=12454) 2024-12-12T22:37:34,192 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121217ac2624c1bd484785f4d9a6cdf1366b_487bd01b47fe4fd77cb2b4619f92faba store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:34,194 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121217ac2624c1bd484785f4d9a6cdf1366b_487bd01b47fe4fd77cb2b4619f92faba, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:34,194 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121217ac2624c1bd484785f4d9a6cdf1366b_487bd01b47fe4fd77cb2b4619f92faba because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:34,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742454_1630 (size=13289) 2024-12-12T22:37:34,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742455_1631 (size=4469) 2024-12-12T22:37:34,234 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#A#compaction#525 average throughput is 0.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:34,235 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4b17d3c5dc7c4043a936ce94b4baff86 is 175, key is test_row_0/A:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:34,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742456_1632 (size=32243) 2024-12-12T22:37:34,257 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/72de4e266d164345a4a8a772431f7236 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/72de4e266d164345a4a8a772431f7236 2024-12-12T22:37:34,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:34,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36025 {}] regionserver.HRegion(8581): Flush requested on 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:34,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. as already flushing 2024-12-12T22:37:34,275 DEBUG [Thread-2289 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:50645 2024-12-12T22:37:34,275 DEBUG [Thread-2289 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:34,279 DEBUG [Thread-2287 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x60d631a3 to 127.0.0.1:50645 2024-12-12T22:37:34,279 DEBUG [Thread-2287 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:34,284 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/B of 487bd01b47fe4fd77cb2b4619f92faba into 72de4e266d164345a4a8a772431f7236(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:34,284 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:34,284 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/B, priority=13, startTime=1734043054158; duration=0sec 2024-12-12T22:37:34,284 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-12T22:37:34,284 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:B 2024-12-12T22:37:34,284 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-12T22:37:34,295 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-12T22:37:34,296 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1540): 487bd01b47fe4fd77cb2b4619f92faba/C is initiating minor compaction (all files) 2024-12-12T22:37:34,296 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 487bd01b47fe4fd77cb2b4619f92faba/C in TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,297 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/552fd15a1744444d958bbf791a61843f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221] into tmpdir=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp, totalSize=36.9 K 2024-12-12T22:37:34,297 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 552fd15a1744444d958bbf791a61843f, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=384, earliestPutTs=1734043051105 2024-12-12T22:37:34,301 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ea0318b17d543cdbdf1ae7e9dc6f5ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=395, earliestPutTs=1734043051739 2024-12-12T22:37:34,301 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] compactions.Compactor(224): Compacting b9560755c836438fbca443bc93424221, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=425, earliestPutTs=1734043052116 2024-12-12T22:37:34,341 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 487bd01b47fe4fd77cb2b4619f92faba#C#compaction#527 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-12T22:37:34,342 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/09ca67ea58074f10a581c788d23de712 is 50, key is test_row_0/C:col10/1734043052116/Put/seqid=0 2024-12-12T22:37:34,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742457_1633 (size=13289) 2024-12-12T22:37:34,361 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/09ca67ea58074f10a581c788d23de712 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/09ca67ea58074f10a581c788d23de712 2024-12-12T22:37:34,372 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/C of 487bd01b47fe4fd77cb2b4619f92faba into 09ca67ea58074f10a581c788d23de712(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:34,372 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:34,372 INFO [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/C, priority=13, startTime=1734043054159; duration=0sec 2024-12-12T22:37:34,372 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:34,372 DEBUG [RS:0;1aef280cf0a8:36025-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:C 2024-12-12T22:37:34,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:34,587 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241212d0e346e1edf44c2aab73fe87705e1ef6_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0e346e1edf44c2aab73fe87705e1ef6_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:34,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:34,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 is 175, key is test_row_0/A:col10/1734043052748/Put/seqid=0 2024-12-12T22:37:34,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742458_1634 (size=31255) 2024-12-12T22:37:34,594 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=435, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 2024-12-12T22:37:34,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/cfec156bfe194ccbaa79067df0a93a7a is 50, key is test_row_0/B:col10/1734043052748/Put/seqid=0 2024-12-12T22:37:34,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742459_1635 (size=12301) 2024-12-12T22:37:34,612 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/cfec156bfe194ccbaa79067df0a93a7a 2024-12-12T22:37:34,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/44977acf7fb749709c962cfc3e101b16 is 50, key is test_row_0/C:col10/1734043052748/Put/seqid=0 2024-12-12T22:37:34,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742460_1636 (size=12301) 2024-12-12T22:37:34,672 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/44977acf7fb749709c962cfc3e101b16 2024-12-12T22:37:34,672 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/4b17d3c5dc7c4043a936ce94b4baff86 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4b17d3c5dc7c4043a936ce94b4baff86 2024-12-12T22:37:34,676 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 487bd01b47fe4fd77cb2b4619f92faba/A of 487bd01b47fe4fd77cb2b4619f92faba into 4b17d3c5dc7c4043a936ce94b4baff86(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-12T22:37:34,676 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:34,676 INFO [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba., storeName=487bd01b47fe4fd77cb2b4619f92faba/A, priority=13, startTime=1734043054158; duration=0sec 2024-12-12T22:37:34,677 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-12T22:37:34,677 DEBUG [RS:0;1aef280cf0a8:36025-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 487bd01b47fe4fd77cb2b4619f92faba:A 2024-12-12T22:37:34,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 2024-12-12T22:37:34,681 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0, entries=150, sequenceid=435, filesize=30.5 K 2024-12-12T22:37:34,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/cfec156bfe194ccbaa79067df0a93a7a as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/cfec156bfe194ccbaa79067df0a93a7a 2024-12-12T22:37:34,686 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/cfec156bfe194ccbaa79067df0a93a7a, entries=150, sequenceid=435, filesize=12.0 K 2024-12-12T22:37:34,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/44977acf7fb749709c962cfc3e101b16 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/44977acf7fb749709c962cfc3e101b16 2024-12-12T22:37:34,694 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/44977acf7fb749709c962cfc3e101b16, entries=150, sequenceid=435, filesize=12.0 K 2024-12-12T22:37:34,695 INFO [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=13.42 KB/13740 for 487bd01b47fe4fd77cb2b4619f92faba in 537ms, sequenceid=435, compaction requested=false 2024-12-12T22:37:34,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.HRegion(2538): Flush status journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:34,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:34,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/1aef280cf0a8:0-0 {event_type=RS_FLUSH_REGIONS, pid=152}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=152 2024-12-12T22:37:34,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster(4106): Remote procedure done, pid=152 2024-12-12T22:37:34,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-12-12T22:37:34,698 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5540 sec 2024-12-12T22:37:34,700 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=151, table=TestAcidGuarantees in 2.5590 sec 2024-12-12T22:37:34,868 DEBUG [Thread-2293 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:50645 2024-12-12T22:37:34,868 DEBUG [Thread-2293 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:34,873 DEBUG [Thread-2295 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:50645 2024-12-12T22:37:34,873 DEBUG [Thread-2295 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:34,879 DEBUG [Thread-2291 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:50645 2024-12-12T22:37:34,879 DEBUG [Thread-2291 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:36,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-12-12T22:37:36,269 INFO [Thread-2297 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4194 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4175 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3999 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4141 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4095 2024-12-12T22:37:36,269 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-12T22:37:36,269 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:37:36,269 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e047c09 to 127.0.0.1:50645 2024-12-12T22:37:36,269 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:36,270 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-12T22:37:36,270 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-12T22:37:36,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:36,273 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043056273"}]},"ts":"1734043056273"} 2024-12-12T22:37:36,274 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-12T22:37:36,287 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-12T22:37:36,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-12T22:37:36,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, UNASSIGN}] 2024-12-12T22:37:36,290 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=155, ppid=154, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, UNASSIGN 2024-12-12T22:37:36,290 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=155 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=CLOSING, regionLocation=1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:36,291 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-12T22:37:36,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; CloseRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576}] 2024-12-12T22:37:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:36,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:36,442 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] handler.UnassignRegionHandler(124): Close 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1681): Closing 487bd01b47fe4fd77cb2b4619f92faba, disabling compactions & flushes 2024-12-12T22:37:36,443 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. after waiting 0 ms 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:36,443 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(2837): Flushing 487bd01b47fe4fd77cb2b4619f92faba 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=A 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=B 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 487bd01b47fe4fd77cb2b4619f92faba, store=C 2024-12-12T22:37:36,443 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-12T22:37:36,447 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125fa85f567a7d40209bc7487e0a56cb4e_487bd01b47fe4fd77cb2b4619f92faba is 50, key is test_row_0/A:col10/1734043054878/Put/seqid=0 2024-12-12T22:37:36,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742461_1637 (size=12454) 2024-12-12T22:37:36,450 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-12T22:37:36,453 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412125fa85f567a7d40209bc7487e0a56cb4e_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125fa85f567a7d40209bc7487e0a56cb4e_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:36,454 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/dceadc5baac649abbf7eb08640b32245, store: [table=TestAcidGuarantees family=A region=487bd01b47fe4fd77cb2b4619f92faba] 2024-12-12T22:37:36,454 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/dceadc5baac649abbf7eb08640b32245 is 175, key is test_row_0/A:col10/1734043054878/Put/seqid=0 2024-12-12T22:37:36,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742462_1638 (size=31255) 2024-12-12T22:37:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:36,858 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=446, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/dceadc5baac649abbf7eb08640b32245 2024-12-12T22:37:36,863 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/79e489b1e9014f7d96ac20a17e7487c6 is 50, key is test_row_0/B:col10/1734043054878/Put/seqid=0 2024-12-12T22:37:36,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742463_1639 (size=12301) 2024-12-12T22:37:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:37,269 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/79e489b1e9014f7d96ac20a17e7487c6 2024-12-12T22:37:37,273 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/dee1686c63bc4f6fb060861f2fae28a7 is 50, key is test_row_0/C:col10/1734043054878/Put/seqid=0 2024-12-12T22:37:37,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742464_1640 (size=12301) 2024-12-12T22:37:37,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:37,681 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=446 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/dee1686c63bc4f6fb060861f2fae28a7 2024-12-12T22:37:37,685 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/A/dceadc5baac649abbf7eb08640b32245 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/dceadc5baac649abbf7eb08640b32245 2024-12-12T22:37:37,688 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/dceadc5baac649abbf7eb08640b32245, entries=150, sequenceid=446, filesize=30.5 K 2024-12-12T22:37:37,689 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/B/79e489b1e9014f7d96ac20a17e7487c6 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/79e489b1e9014f7d96ac20a17e7487c6 2024-12-12T22:37:37,692 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/79e489b1e9014f7d96ac20a17e7487c6, entries=150, sequenceid=446, filesize=12.0 K 2024-12-12T22:37:37,692 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/.tmp/C/dee1686c63bc4f6fb060861f2fae28a7 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/dee1686c63bc4f6fb060861f2fae28a7 2024-12-12T22:37:37,695 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/dee1686c63bc4f6fb060861f2fae28a7, entries=150, sequenceid=446, filesize=12.0 K 2024-12-12T22:37:37,696 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 487bd01b47fe4fd77cb2b4619f92faba in 1252ms, sequenceid=446, compaction requested=true 2024-12-12T22:37:37,696 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080] to archive 2024-12-12T22:37:37,696 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6289983006be43d2af3e4b7f9b4a9259 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31dea063c3c9465c8c1132b4a1feff84 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/67953b459449417b86c44c4feb73b2f6 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/bf103fea04c44ab1a13529101d38f2e5 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/31999745c06d47929a8021dee1c4875a 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e2ee0016bc9041d3aff5137523138ea5 2024-12-12T22:37:37,698 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a03f57831b554d259ad8a19772536c09 2024-12-12T22:37:37,699 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/267007e07c68428380d597f91506b6a3 2024-12-12T22:37:37,699 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/14ff077e33d947afa5824c90296ed0c0 2024-12-12T22:37:37,699 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d2cf47705b4f45f19cb382bb33aaee9f 2024-12-12T22:37:37,699 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/b296948fa4694a268d54be9b5a0e84db 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/f583be3b142041d28c829cc41fb3d304 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/71109f3ec11d41319db82e53cbaa82cf 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/cd2785bda46b4afd9a0cbbfa13490ae2 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/26bcc0733632474ca5a92b6f3f63b278 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/e61063bfe0844183b6b50b82c7099fca 2024-12-12T22:37:37,700 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/61703f2a91bc4438a64652d7c94f01f3 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/162cabfee7be49abb6325c54c8539ec5 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/d1f7aaee84a94902a6676a90831102c2 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/446100dfd85b4c68966496f92fab60de 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/eee9a3078577493eab3a62ee4132f712 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/c220ff328c414bf9866460a1971ead60 2024-12-12T22:37:37,701 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/a9dbcb5bbf4d4504bb97d24b6907dd17 2024-12-12T22:37:37,703 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/2a7a4231c7fe4d508af9ecb8f9eab552 2024-12-12T22:37:37,704 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4db45c76a0194083952e154f4e112b91 2024-12-12T22:37:37,704 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/6c67137d63cf4a3895892097805d6b44 2024-12-12T22:37:37,704 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/9e12389697cc4f14ad6eba95633ac15a 2024-12-12T22:37:37,704 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/0e6b6c2d0c994d4ca6a0310f26570080 2024-12-12T22:37:37,704 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/918289933b8740aca956278446a56295 2024-12-12T22:37:37,705 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/62da6a9f8efa4093939255da88425a55, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8eeef63b1dc64f35bf8a6d04c3e5e667, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/69f6455fe8064e54be7d6f3e48814cb4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/c43d46d2a85e4bf4b7fff230c68e9348, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b129c36064c94a99ba6e379feebdd875, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/41b3e841fa664664bf28924aad9eba50, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/71a8776895b545a19a7d1421108c3353, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/1ecb459a5a4347e4b31b894655f00069, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17] to archive 2024-12-12T22:37:37,705 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:37,707 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0ef4a98a167e4d1ab61c7bb592e94bf8 2024-12-12T22:37:37,707 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/62da6a9f8efa4093939255da88425a55 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/62da6a9f8efa4093939255da88425a55 2024-12-12T22:37:37,707 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/4a6ff29399de48c78ffe5620002c2049 2024-12-12T22:37:37,707 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8eeef63b1dc64f35bf8a6d04c3e5e667 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8eeef63b1dc64f35bf8a6d04c3e5e667 2024-12-12T22:37:37,707 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/bfd461df81054eb292088ea0bb6c40e2 2024-12-12T22:37:37,708 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/07eff79b4fe54fd28a0fe8d85f4bfe59 2024-12-12T22:37:37,710 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/5b8e8895d75b406da0cc6f4d09fce601 2024-12-12T22:37:37,710 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/fb0f842b824a492ab08933acb5cd66a9 2024-12-12T22:37:37,710 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/068dac28fed7497bb9404303cdf02265 2024-12-12T22:37:37,710 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/69f6455fe8064e54be7d6f3e48814cb4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/69f6455fe8064e54be7d6f3e48814cb4 2024-12-12T22:37:37,711 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/99c9949f5edd473297792560ed17d6b8 2024-12-12T22:37:37,711 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3ac647a4de0049e6ab768c1fc90194af 2024-12-12T22:37:37,711 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/713df1159a0a4f5b92f3e5cc48c9b137 2024-12-12T22:37:37,712 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/0abd28d87c2546c6ad5f6aa8a5566ebc 2024-12-12T22:37:37,712 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/643e14dbfe2a4c13a7a5064743d95dc3 2024-12-12T22:37:37,712 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b129c36064c94a99ba6e379feebdd875 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b129c36064c94a99ba6e379feebdd875 2024-12-12T22:37:37,712 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/e2683a2640d840acaa07b24d5a7933ef 2024-12-12T22:37:37,713 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/b95b2c6196fe48209fb8c495e50b4f64 2024-12-12T22:37:37,713 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/41b3e841fa664664bf28924aad9eba50 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/41b3e841fa664664bf28924aad9eba50 2024-12-12T22:37:37,713 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/3e095b716f9248e9af8b120e13ba10a9 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/71a8776895b545a19a7d1421108c3353 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/71a8776895b545a19a7d1421108c3353 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/6636ecce53fa4e6fa06382ee214563b2 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8582cd52bdb34863af3b915ce6e7cc87 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/741859aca9ce4650881b868579aa0a93 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/1ecb459a5a4347e4b31b894655f00069 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/1ecb459a5a4347e4b31b894655f00069 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/65bd4e88e96246309b41d438ddeadbc3 2024-12-12T22:37:37,714 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/8bb20119d4d247ea967b469b9c43826d 2024-12-12T22:37:37,715 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/168aefac4e064cd8966dfc2166088f17 2024-12-12T22:37:37,715 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/c43d46d2a85e4bf4b7fff230c68e9348 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/c43d46d2a85e4bf4b7fff230c68e9348 2024-12-12T22:37:37,716 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/176c6be7ea6147538c47e5d424a68310, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/6568955b83c041d3b42ba0d86b144781, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/106f38c0972149e6bf06f9b73f4f65ad, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c9c75977dd8144d7b2e99e447e48d248, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5f6cd44feba54f57aef6e7f6c7a5e0b7, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/98ff1132eaee475cb0538e5595dbc736, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/49e1f3777ee24a999060461f358286f9, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/552fd15a1744444d958bbf791a61843f, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221] to archive 2024-12-12T22:37:37,717 DEBUG [StoreCloser-TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-12T22:37:37,718 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/176c6be7ea6147538c47e5d424a68310 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/176c6be7ea6147538c47e5d424a68310 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e88fdfe01bd34132bf8f2b67945e0cd8 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/6568955b83c041d3b42ba0d86b144781 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/6568955b83c041d3b42ba0d86b144781 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/1e818518c5064689b8bbcc3fbff62068 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9cbdca274dcd49299add277957b3c2f4 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/9acd8b7adb6647339617b85bea4a30cb 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/25ac1095d5de4f3daf8b36612223784d 2024-12-12T22:37:37,719 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b43e0d4c69f045a1956dc393b63de056 2024-12-12T22:37:37,720 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/2c5232442a2f4ae194ea600fa7b0a8ba 2024-12-12T22:37:37,720 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/106f38c0972149e6bf06f9b73f4f65ad to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/106f38c0972149e6bf06f9b73f4f65ad 2024-12-12T22:37:37,720 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/a5d9846006794b40953a46a370e7dd57 2024-12-12T22:37:37,720 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/bfd14a35ed9d478ba5d0b1ffd9c19222 2024-12-12T22:37:37,721 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/79aa49b7c2db4fd1bfc84d1b8da97ad5 2024-12-12T22:37:37,721 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c9c75977dd8144d7b2e99e447e48d248 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c9c75977dd8144d7b2e99e447e48d248 2024-12-12T22:37:37,721 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0e1eb59567894093a58365763934de55 2024-12-12T22:37:37,721 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/0718b858e54340499c3d425c3a120495 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/ed26d6bc36fe4472b55669f16e7c1a23 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5f6cd44feba54f57aef6e7f6c7a5e0b7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5f6cd44feba54f57aef6e7f6c7a5e0b7 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/98ff1132eaee475cb0538e5595dbc736 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/98ff1132eaee475cb0538e5595dbc736 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/49e1f3777ee24a999060461f358286f9 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/49e1f3777ee24a999060461f358286f9 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/887c384651a94a9780f838bc86535f69 2024-12-12T22:37:37,722 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/c48d5a96f6d04c37afb55936544149e3 2024-12-12T22:37:37,723 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/796356826e624244a40a5290a4021892 2024-12-12T22:37:37,723 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/e94070d70266468a97b033622b1239be 2024-12-12T22:37:37,723 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b987d7bbfbce4620a86ee1d956442e3a 2024-12-12T22:37:37,724 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/d7eeca17c651425eaac8f1abf843de15 2024-12-12T22:37:37,724 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/5ea0318b17d543cdbdf1ae7e9dc6f5ff 2024-12-12T22:37:37,724 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/552fd15a1744444d958bbf791a61843f to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/552fd15a1744444d958bbf791a61843f 2024-12-12T22:37:37,724 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/b9560755c836438fbca443bc93424221 2024-12-12T22:37:37,727 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits/449.seqid, newMaxSeqId=449, maxSeqId=4 2024-12-12T22:37:37,727 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba. 2024-12-12T22:37:37,727 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] regionserver.HRegion(1635): Region close journal for 487bd01b47fe4fd77cb2b4619f92faba: 2024-12-12T22:37:37,728 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION, pid=156}] handler.UnassignRegionHandler(170): Closed 487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:37,729 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=155 updating hbase:meta row=487bd01b47fe4fd77cb2b4619f92faba, regionState=CLOSED 2024-12-12T22:37:37,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-12T22:37:37,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseRegionProcedure 487bd01b47fe4fd77cb2b4619f92faba, server=1aef280cf0a8,36025,1734042873576 in 1.4390 sec 2024-12-12T22:37:37,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-12T22:37:37,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=487bd01b47fe4fd77cb2b4619f92faba, UNASSIGN in 1.4420 sec 2024-12-12T22:37:37,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-12T22:37:37,732 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4440 sec 2024-12-12T22:37:37,733 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734043057733"}]},"ts":"1734043057733"} 2024-12-12T22:37:37,734 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-12T22:37:37,773 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-12T22:37:37,775 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5040 sec 2024-12-12T22:37:38,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-12T22:37:38,377 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-12T22:37:38,378 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-12T22:37:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,379 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=157, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,380 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=157, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T22:37:38,381 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,383 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C, FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits] 2024-12-12T22:37:38,386 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/7c2db4d21fa042e2bfadb8d3e0fdd7e0 2024-12-12T22:37:38,386 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4b17d3c5dc7c4043a936ce94b4baff86 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/4b17d3c5dc7c4043a936ce94b4baff86 2024-12-12T22:37:38,386 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/dceadc5baac649abbf7eb08640b32245 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/A/dceadc5baac649abbf7eb08640b32245 2024-12-12T22:37:38,389 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/79e489b1e9014f7d96ac20a17e7487c6 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/79e489b1e9014f7d96ac20a17e7487c6 2024-12-12T22:37:38,389 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/72de4e266d164345a4a8a772431f7236 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/72de4e266d164345a4a8a772431f7236 2024-12-12T22:37:38,389 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/cfec156bfe194ccbaa79067df0a93a7a to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/B/cfec156bfe194ccbaa79067df0a93a7a 2024-12-12T22:37:38,393 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/44977acf7fb749709c962cfc3e101b16 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/44977acf7fb749709c962cfc3e101b16 2024-12-12T22:37:38,393 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/dee1686c63bc4f6fb060861f2fae28a7 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/dee1686c63bc4f6fb060861f2fae28a7 2024-12-12T22:37:38,393 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/09ca67ea58074f10a581c788d23de712 to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/C/09ca67ea58074f10a581c788d23de712 2024-12-12T22:37:38,396 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits/449.seqid to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba/recovered.edits/449.seqid 2024-12-12T22:37:38,397 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/default/TestAcidGuarantees/487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,397 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-12T22:37:38,397 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:37:38,398 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-12T22:37:38,407 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f45717fa5904ab5ab08822caa9edc2e_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412123f45717fa5904ab5ab08822caa9edc2e_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,407 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212170dc4d9b9ce4fcbbfe8062ffc147406_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212170dc4d9b9ce4fcbbfe8062ffc147406_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,407 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121244a2ee77ca42470199d9731c275f7fef_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121244a2ee77ca42470199d9731c275f7fef_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,407 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212086d22b4f3854e6b8a8ac46585ddfe06_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212086d22b4f3854e6b8a8ac46585ddfe06_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,407 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212402c2e4dcc814f47bd6dfafb112cc22f_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212402c2e4dcc814f47bd6dfafb112cc22f_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,408 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212438e2f7686274060954e52d6dca635d6_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212438e2f7686274060954e52d6dca635d6_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,409 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127731449584fd4a3baeee0fa4e6cadf71_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412127731449584fd4a3baeee0fa4e6cadf71_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,409 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121278c4a7c7fce8497b878ad94e770f8f46_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121278c4a7c7fce8497b878ad94e770f8f46_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,409 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125fa85f567a7d40209bc7487e0a56cb4e_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125fa85f567a7d40209bc7487e0a56cb4e_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,409 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212162936491f8a486b8ad6cf2d1787dcf3_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212162936491f8a486b8ad6cf2d1787dcf3_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,409 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125bc83388cb9b4dfea9bd03efefc1dfb4_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412125bc83388cb9b4dfea9bd03efefc1dfb4_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,410 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a3a1f5f78b148df9ad96d8c15bc3f1f_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129a3a1f5f78b148df9ad96d8c15bc3f1f_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,410 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129493f28f3d804a968da2a6506f346000_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129493f28f3d804a968da2a6506f346000_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,411 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128274b58dc2074abc9730d795d0922413_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412128274b58dc2074abc9730d795d0922413_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,411 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b59f877f930542eb888bd2db20aa7c26_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212b59f877f930542eb888bd2db20aa7c26_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,411 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c86f478fea3c4b2782555da3997b555a_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212c86f478fea3c4b2782555da3997b555a_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,411 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0e346e1edf44c2aab73fe87705e1ef6_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d0e346e1edf44c2aab73fe87705e1ef6_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,412 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129c65e9beb6ea45bc912a2bb375cc5cd9_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412129c65e9beb6ea45bc912a2bb375cc5cd9_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,412 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f7b026bf2ce14c61a7d661c5bd96b821_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212f7b026bf2ce14c61a7d661c5bd96b821_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,412 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279aa951dc9124f42981f0f4ebef20b59_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121279aa951dc9124f42981f0f4ebef20b59_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,412 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d36f3f19a4454d40a28611796ca1ded0_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241212d36f3f19a4454d40a28611796ca1ded0_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,412 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258cd658eb6304cfb90fa7ef88039af87_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121258cd658eb6304cfb90fa7ef88039af87_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,413 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124ea0d1899bd143ae8de6cdde92ed0b29_487bd01b47fe4fd77cb2b4619f92faba to hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412124ea0d1899bd143ae8de6cdde92ed0b29_487bd01b47fe4fd77cb2b4619f92faba 2024-12-12T22:37:38,416 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-12T22:37:38,430 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=157, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,433 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-12T22:37:38,447 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-12T22:37:38,455 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=157, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,455 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-12T22:37:38,455 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734043058455"}]},"ts":"9223372036854775807"} 2024-12-12T22:37:38,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T22:37:38,482 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-12T22:37:38,483 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 487bd01b47fe4fd77cb2b4619f92faba, NAME => 'TestAcidGuarantees,,1734043029833.487bd01b47fe4fd77cb2b4619f92faba.', STARTKEY => '', ENDKEY => ''}] 2024-12-12T22:37:38,483 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-12T22:37:38,483 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734043058483"}]},"ts":"9223372036854775807"} 2024-12-12T22:37:38,511 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-12T22:37:38,575 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=157, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-12T22:37:38,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 198 msec 2024-12-12T22:37:38,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35059 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-12T22:37:38,682 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-12T22:37:38,694 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=243 (was 243), OpenFileDescriptor=452 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1420 (was 1425), ProcessCount=9 (was 9), AvailableMemoryMB=4785 (was 3870) - AvailableMemoryMB LEAK? - 2024-12-12T22:37:38,694 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-12T22:37:38,694 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-12T22:37:38,694 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:50645 2024-12-12T22:37:38,694 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:38,694 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-12T22:37:38,695 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1923464191, stopped=false 2024-12-12T22:37:38,695 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=1aef280cf0a8,35059,1734042872477 2024-12-12T22:37:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T22:37:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-12T22:37:38,707 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-12T22:37:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:37:38,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:37:38,707 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:38,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T22:37:38,707 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '1aef280cf0a8,36025,1734042873576' ***** 2024-12-12T22:37:38,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-12T22:37:38,707 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-12T22:37:38,708 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-12T22:37:38,708 INFO [RS:0;1aef280cf0a8:36025 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-12T22:37:38,708 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-12T22:37:38,708 INFO [RS:0;1aef280cf0a8:36025 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-12T22:37:38,708 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(3579): Received CLOSE for 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1224): stopping server 1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:38,709 DEBUG [RS:0;1aef280cf0a8:36025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-12T22:37:38,709 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 20f68c42b55d0d7b4a49ed486e40f5a4, disabling compactions & flushes 2024-12-12T22:37:38,709 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:37:38,709 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:37:38,709 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-12T22:37:38,709 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. after waiting 0 ms 2024-12-12T22:37:38,709 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:37:38,709 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 20f68c42b55d0d7b4a49ed486e40f5a4=hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4.} 2024-12-12T22:37:38,709 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 20f68c42b55d0d7b4a49ed486e40f5a4 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-12T22:37:38,710 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 20f68c42b55d0d7b4a49ed486e40f5a4 2024-12-12T22:37:38,710 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-12T22:37:38,710 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-12T22:37:38,710 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-12T22:37:38,710 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-12T22:37:38,710 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-12T22:37:38,710 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-12T22:37:38,734 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/.tmp/info/46476007495a40a9918d30f0557a7c19 is 45, key is default/info:d/1734042878584/Put/seqid=0 2024-12-12T22:37:38,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742465_1641 (size=5037) 2024-12-12T22:37:38,741 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/.tmp/info/46476007495a40a9918d30f0557a7c19 2024-12-12T22:37:38,749 INFO [regionserver/1aef280cf0a8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T22:37:38,762 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/.tmp/info/46476007495a40a9918d30f0557a7c19 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/info/46476007495a40a9918d30f0557a7c19 2024-12-12T22:37:38,765 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/info/5d643a8896624af9bb8343126f43d7a8 is 143, key is hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4./info:regioninfo/1734042878429/Put/seqid=0 2024-12-12T22:37:38,788 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/info/46476007495a40a9918d30f0557a7c19, entries=2, sequenceid=6, filesize=4.9 K 2024-12-12T22:37:38,788 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 20f68c42b55d0d7b4a49ed486e40f5a4 in 79ms, sequenceid=6, compaction requested=false 2024-12-12T22:37:38,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742466_1642 (size=7725) 2024-12-12T22:37:38,803 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/info/5d643a8896624af9bb8343126f43d7a8 2024-12-12T22:37:38,824 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/namespace/20f68c42b55d0d7b4a49ed486e40f5a4/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-12T22:37:38,827 INFO [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:37:38,827 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 20f68c42b55d0d7b4a49ed486e40f5a4: 2024-12-12T22:37:38,827 DEBUG [RS_CLOSE_REGION-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734042877357.20f68c42b55d0d7b4a49ed486e40f5a4. 2024-12-12T22:37:38,861 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/rep_barrier/759a80a08c1a430ebc867fbf91277ca2 is 102, key is TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5./rep_barrier:/1734042908135/DeleteFamily/seqid=0 2024-12-12T22:37:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742467_1643 (size=6025) 2024-12-12T22:37:38,893 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/rep_barrier/759a80a08c1a430ebc867fbf91277ca2 2024-12-12T22:37:38,911 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T22:37:38,926 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/table/e800fe3acd2e460cb4c7a36dd70e9713 is 96, key is TestAcidGuarantees,,1734042878916.3292c08f1e2fe18d3fcbb52f186614f5./table:/1734042908135/DeleteFamily/seqid=0 2024-12-12T22:37:38,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742468_1644 (size=5942) 2024-12-12T22:37:39,111 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T22:37:39,135 INFO [regionserver/1aef280cf0a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-12T22:37:39,135 INFO [regionserver/1aef280cf0a8:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-12T22:37:39,312 DEBUG [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-12T22:37:39,357 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/table/e800fe3acd2e460cb4c7a36dd70e9713 2024-12-12T22:37:39,361 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/info/5d643a8896624af9bb8343126f43d7a8 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/info/5d643a8896624af9bb8343126f43d7a8 2024-12-12T22:37:39,365 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/info/5d643a8896624af9bb8343126f43d7a8, entries=22, sequenceid=93, filesize=7.5 K 2024-12-12T22:37:39,366 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/rep_barrier/759a80a08c1a430ebc867fbf91277ca2 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/rep_barrier/759a80a08c1a430ebc867fbf91277ca2 2024-12-12T22:37:39,369 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/rep_barrier/759a80a08c1a430ebc867fbf91277ca2, entries=6, sequenceid=93, filesize=5.9 K 2024-12-12T22:37:39,370 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/.tmp/table/e800fe3acd2e460cb4c7a36dd70e9713 as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/table/e800fe3acd2e460cb4c7a36dd70e9713 2024-12-12T22:37:39,374 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/table/e800fe3acd2e460cb4c7a36dd70e9713, entries=9, sequenceid=93, filesize=5.8 K 2024-12-12T22:37:39,375 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 665ms, sequenceid=93, compaction requested=false 2024-12-12T22:37:39,387 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-12T22:37:39,388 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-12T22:37:39,388 INFO [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-12T22:37:39,388 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-12T22:37:39,388 DEBUG [RS_CLOSE_META-regionserver/1aef280cf0a8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-12T22:37:39,512 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1250): stopping server 1aef280cf0a8,36025,1734042873576; all regions closed. 2024-12-12T22:37:39,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741834_1010 (size=26050) 2024-12-12T22:37:39,522 DEBUG [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/oldWALs 2024-12-12T22:37:39,522 INFO [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 1aef280cf0a8%2C36025%2C1734042873576.meta:.meta(num 1734042877010) 2024-12-12T22:37:39,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741833_1009 (size=14746509) 2024-12-12T22:37:39,529 DEBUG [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/oldWALs 2024-12-12T22:37:39,529 INFO [RS:0;1aef280cf0a8:36025 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 1aef280cf0a8%2C36025%2C1734042873576:(num 1734042876289) 2024-12-12T22:37:39,529 DEBUG [RS:0;1aef280cf0a8:36025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:39,529 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.LeaseManager(133): Closed leases 2024-12-12T22:37:39,529 INFO [RS:0;1aef280cf0a8:36025 {}] hbase.ChoreService(370): Chore service for: regionserver/1aef280cf0a8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-12T22:37:39,530 INFO [RS:0;1aef280cf0a8:36025 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36025 2024-12-12T22:37:39,530 INFO [regionserver/1aef280cf0a8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T22:37:39,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-12T22:37:39,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/1aef280cf0a8,36025,1734042873576 2024-12-12T22:37:39,549 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [1aef280cf0a8,36025,1734042873576] 2024-12-12T22:37:39,549 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 1aef280cf0a8,36025,1734042873576; numProcessing=1 2024-12-12T22:37:39,581 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/1aef280cf0a8,36025,1734042873576 already deleted, retry=false 2024-12-12T22:37:39,581 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 1aef280cf0a8,36025,1734042873576 expired; onlineServers=0 2024-12-12T22:37:39,582 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '1aef280cf0a8,35059,1734042872477' ***** 2024-12-12T22:37:39,582 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-12T22:37:39,582 DEBUG [M:0;1aef280cf0a8:35059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6746f738, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=1aef280cf0a8/172.17.0.2:0 2024-12-12T22:37:39,582 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionServer(1224): stopping server 1aef280cf0a8,35059,1734042872477 2024-12-12T22:37:39,582 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionServer(1250): stopping server 1aef280cf0a8,35059,1734042872477; all regions closed. 2024-12-12T22:37:39,582 DEBUG [M:0;1aef280cf0a8:35059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-12T22:37:39,582 DEBUG [M:0;1aef280cf0a8:35059 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-12T22:37:39,582 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-12T22:37:39,582 DEBUG [M:0;1aef280cf0a8:35059 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-12T22:37:39,582 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.small.0-1734042875984 {}] cleaner.HFileCleaner(306): Exit Thread[master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.small.0-1734042875984,5,FailOnTimeoutGroup] 2024-12-12T22:37:39,582 DEBUG [master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.large.0-1734042875976 {}] cleaner.HFileCleaner(306): Exit Thread[master/1aef280cf0a8:0:becomeActiveMaster-HFileCleaner.large.0-1734042875976,5,FailOnTimeoutGroup] 2024-12-12T22:37:39,582 INFO [M:0;1aef280cf0a8:35059 {}] hbase.ChoreService(370): Chore service for: master/1aef280cf0a8:0 had [] on shutdown 2024-12-12T22:37:39,583 DEBUG [M:0;1aef280cf0a8:35059 {}] master.HMaster(1733): Stopping service threads 2024-12-12T22:37:39,583 INFO [M:0;1aef280cf0a8:35059 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-12T22:37:39,584 INFO [M:0;1aef280cf0a8:35059 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-12T22:37:39,584 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-12T22:37:39,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-12T22:37:39,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-12T22:37:39,598 DEBUG [M:0;1aef280cf0a8:35059 {}] zookeeper.ZKUtil(347): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-12T22:37:39,599 WARN [M:0;1aef280cf0a8:35059 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-12T22:37:39,599 INFO [M:0;1aef280cf0a8:35059 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-12T22:37:39,599 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-12T22:37:39,599 INFO [M:0;1aef280cf0a8:35059 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-12T22:37:39,599 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-12T22:37:39,599 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:37:39,599 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:37:39,599 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-12T22:37:39,599 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:37:39,599 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=773.83 KB heapSize=953.09 KB 2024-12-12T22:37:39,620 DEBUG [M:0;1aef280cf0a8:35059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0289b73ad73d4eceb8cc8c50deb86a4d is 82, key is hbase:meta,,1/info:regioninfo/1734042877204/Put/seqid=0 2024-12-12T22:37:39,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742469_1645 (size=5672) 2024-12-12T22:37:39,651 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2229 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0289b73ad73d4eceb8cc8c50deb86a4d 2024-12-12T22:37:39,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T22:37:39,665 INFO [RS:0;1aef280cf0a8:36025 {}] regionserver.HRegionServer(1307): Exiting; stopping=1aef280cf0a8,36025,1734042873576; zookeeper connection closed. 2024-12-12T22:37:39,665 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36025-0x1001c6182dc0001, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T22:37:39,665 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b89c82a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b89c82a 2024-12-12T22:37:39,666 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-12T22:37:39,698 DEBUG [M:0;1aef280cf0a8:35059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/35a46c18e3af4473989c7c0a70f95d6f is 2283, key is \x00\x00\x00\x00\x00\x00\x00\x82/proc:d/1734043032972/Put/seqid=0 2024-12-12T22:37:39,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742470_1646 (size=41215) 2024-12-12T22:37:39,719 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=773.28 KB at sequenceid=2229 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/35a46c18e3af4473989c7c0a70f95d6f 2024-12-12T22:37:39,728 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 35a46c18e3af4473989c7c0a70f95d6f 2024-12-12T22:37:39,755 DEBUG [M:0;1aef280cf0a8:35059 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7461f1fd5054e568e55e5468aa5fdfa is 69, key is 1aef280cf0a8,36025,1734042873576/rs:state/1734042876063/Put/seqid=0 2024-12-12T22:37:39,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073742471_1647 (size=5156) 2024-12-12T22:37:40,184 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2229 (bloomFilter=true), to=hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7461f1fd5054e568e55e5468aa5fdfa 2024-12-12T22:37:40,188 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0289b73ad73d4eceb8cc8c50deb86a4d as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0289b73ad73d4eceb8cc8c50deb86a4d 2024-12-12T22:37:40,191 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0289b73ad73d4eceb8cc8c50deb86a4d, entries=8, sequenceid=2229, filesize=5.5 K 2024-12-12T22:37:40,192 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/35a46c18e3af4473989c7c0a70f95d6f as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/35a46c18e3af4473989c7c0a70f95d6f 2024-12-12T22:37:40,195 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 35a46c18e3af4473989c7c0a70f95d6f 2024-12-12T22:37:40,195 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/35a46c18e3af4473989c7c0a70f95d6f, entries=157, sequenceid=2229, filesize=40.2 K 2024-12-12T22:37:40,196 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a7461f1fd5054e568e55e5468aa5fdfa as hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a7461f1fd5054e568e55e5468aa5fdfa 2024-12-12T22:37:40,199 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:41151/user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a7461f1fd5054e568e55e5468aa5fdfa, entries=1, sequenceid=2229, filesize=5.0 K 2024-12-12T22:37:40,200 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(3040): Finished flush of dataSize ~773.83 KB/792407, heapSize ~952.80 KB/975664, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 601ms, sequenceid=2229, compaction requested=false 2024-12-12T22:37:40,212 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-12T22:37:40,212 DEBUG [M:0;1aef280cf0a8:35059 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-12T22:37:40,218 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/60a93f54-ddce-a5d1-662a-5d47611110dc/MasterData/WALs/1aef280cf0a8,35059,1734042872477/1aef280cf0a8%2C35059%2C1734042872477.1734042874607 not finished, retry = 0 2024-12-12T22:37:40,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43795 is added to blk_1073741830_1006 (size=938443) 2024-12-12T22:37:40,322 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-12T22:37:40,322 INFO [M:0;1aef280cf0a8:35059 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-12T22:37:40,322 INFO [M:0;1aef280cf0a8:35059 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35059 2024-12-12T22:37:40,537 DEBUG [M:0;1aef280cf0a8:35059 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/1aef280cf0a8,35059,1734042872477 already deleted, retry=false 2024-12-12T22:37:40,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T22:37:40,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35059-0x1001c6182dc0000, quorum=127.0.0.1:50645, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-12T22:37:40,774 INFO [M:0;1aef280cf0a8:35059 {}] regionserver.HRegionServer(1307): Exiting; stopping=1aef280cf0a8,35059,1734042872477; zookeeper connection closed. 2024-12-12T22:37:40,788 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-12T22:37:40,800 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T22:37:40,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T22:37:40,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T22:37:40,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.log.dir/,STOPPED} 2024-12-12T22:37:40,814 WARN [BP-1705391202-172.17.0.2-1734042868369 heartbeating to localhost/127.0.0.1:41151 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-12T22:37:40,814 WARN [BP-1705391202-172.17.0.2-1734042868369 heartbeating to localhost/127.0.0.1:41151 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1705391202-172.17.0.2-1734042868369 (Datanode Uuid 348a55b3-053a-4753-a6e7-a80f38e05dff) service to localhost/127.0.0.1:41151 2024-12-12T22:37:40,815 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-12T22:37:40,815 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-12T22:37:40,821 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/dfs/data/data1/current/BP-1705391202-172.17.0.2-1734042868369 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T22:37:40,821 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/cluster_bd840f93-ddfb-b4ff-4af4-c53b508a9912/dfs/data/data2/current/BP-1705391202-172.17.0.2-1734042868369 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-12T22:37:40,822 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-12T22:37:40,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-12T22:37:40,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-12T22:37:40,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-12T22:37:40,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-12T22:37:40,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/49a5a9c9-8fad-7f83-177a-53e0f46eaa00/hadoop.log.dir/,STOPPED} 2024-12-12T22:37:40,894 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-12T22:37:41,100 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down